tdb-1.4.2/ABI/tdb-1.2.1.sigs0000660000000000000000000001254512406075657014736 0ustar rootroot00000000000000tdb_add_flags: void (struct tdb_context *, unsigned int) tdb_alloc_read: unsigned char *(struct tdb_context *, tdb_off_t, tdb_len_t) tdb_allocate: tdb_off_t (struct tdb_context *, tdb_len_t, struct tdb_record *) tdb_allrecord_lock: int (struct tdb_context *, int, enum tdb_lock_flags, bool) tdb_allrecord_unlock: int (struct tdb_context *, int, bool) tdb_allrecord_upgrade: int (struct tdb_context *) tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA) tdb_brlock: int (struct tdb_context *, int, tdb_off_t, size_t, enum tdb_lock_flags) tdb_brunlock: int (struct tdb_context *, int, tdb_off_t, size_t) tdb_chainlock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA) tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read: int (struct tdb_context *, TDB_DATA) tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA) tdb_chainunlock: int (struct tdb_context *, TDB_DATA) tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA) tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_close: int (struct tdb_context *) tdb_convert: void *(void *, uint32_t) tdb_delete: int (struct tdb_context *, TDB_DATA) tdb_do_delete: int (struct tdb_context *, tdb_off_t, struct tdb_record *) tdb_dump_all: void (struct tdb_context *) tdb_enable_seqnum: void (struct tdb_context *) tdb_error: enum TDB_ERROR (struct tdb_context *) tdb_errorstr: const char *(struct tdb_context *) tdb_exists: int (struct tdb_context *, TDB_DATA) tdb_expand: int (struct tdb_context *, tdb_off_t) tdb_fd: int (struct tdb_context *) tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_find_lock_hash: tdb_off_t (struct tdb_context *, TDB_DATA, uint32_t, int, struct tdb_record *) tdb_firstkey: TDB_DATA (struct tdb_context *) tdb_free: int (struct tdb_context *, tdb_off_t, struct tdb_record *) tdb_freelist_size: int (struct tdb_context *) tdb_get_flags: int (struct tdb_context *) tdb_get_logging_private: void *(struct tdb_context *) tdb_get_seqnum: int (struct tdb_context *) tdb_hash_size: int (struct tdb_context *) tdb_have_extra_locks: bool (struct tdb_context *) tdb_increment_seqnum_nonblock: void (struct tdb_context *) tdb_io_init: void (struct tdb_context *) tdb_lock: int (struct tdb_context *, int, int) tdb_lock_nonblock: int (struct tdb_context *, int, int) tdb_lock_record: int (struct tdb_context *, tdb_off_t) tdb_lockall: int (struct tdb_context *) tdb_lockall_mark: int (struct tdb_context *) tdb_lockall_nonblock: int (struct tdb_context *) tdb_lockall_read: int (struct tdb_context *) tdb_lockall_read_nonblock: int (struct tdb_context *) tdb_lockall_unmark: int (struct tdb_context *) tdb_log_fn: tdb_log_func (struct tdb_context *) tdb_map_size: size_t (struct tdb_context *) tdb_mmap: void (struct tdb_context *) tdb_munmap: int (struct tdb_context *) tdb_name: const char *(struct tdb_context *) tdb_needs_recovery: bool (struct tdb_context *) tdb_nest_lock: int (struct tdb_context *, uint32_t, int, enum tdb_lock_flags) tdb_nest_unlock: int (struct tdb_context *, uint32_t, int, bool) tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_ofs_read: int (struct tdb_context *, tdb_off_t, tdb_off_t *) tdb_ofs_write: int (struct tdb_context *, tdb_off_t, tdb_off_t *) tdb_open: struct tdb_context *(const char *, int, int, int, mode_t) tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func) tdb_parse_data: int (struct tdb_context *, TDB_DATA, tdb_off_t, tdb_len_t, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_printfreelist: int (struct tdb_context *) tdb_rec_free_read: int (struct tdb_context *, tdb_off_t, struct tdb_record *) tdb_rec_read: int (struct tdb_context *, tdb_off_t, struct tdb_record *) tdb_rec_write: int (struct tdb_context *, tdb_off_t, struct tdb_record *) tdb_release_transaction_locks: void (struct tdb_context *) tdb_remove_flags: void (struct tdb_context *, unsigned int) tdb_reopen: int (struct tdb_context *) tdb_reopen_all: int (int) tdb_repack: int (struct tdb_context *) tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *) tdb_set_max_dead: void (struct tdb_context *, int) tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *) tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int) tdb_transaction_cancel: int (struct tdb_context *) tdb_transaction_commit: int (struct tdb_context *) tdb_transaction_lock: int (struct tdb_context *, int, enum tdb_lock_flags) tdb_transaction_prepare_commit: int (struct tdb_context *) tdb_transaction_recover: int (struct tdb_context *) tdb_transaction_start: int (struct tdb_context *) tdb_transaction_start_nonblock: int (struct tdb_context *) tdb_transaction_unlock: int (struct tdb_context *, int) tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *) tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *) tdb_unlock: int (struct tdb_context *, int, int) tdb_unlock_record: int (struct tdb_context *, tdb_off_t) tdb_unlockall: int (struct tdb_context *) tdb_unlockall_read: int (struct tdb_context *) tdb_validate_freelist: int (struct tdb_context *, int *) tdb_wipe_all: int (struct tdb_context *) tdb_write_lock_record: int (struct tdb_context *, tdb_off_t) tdb_write_unlock_record: int (struct tdb_context *, tdb_off_t) tdb-1.4.2/ABI/tdb-1.2.10.sigs0000660000000000000000000000672112406075657015015 0ustar rootroot00000000000000tdb_add_flags: void (struct tdb_context *, unsigned int) tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA) tdb_chainlock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA) tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read: int (struct tdb_context *, TDB_DATA) tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA) tdb_chainunlock: int (struct tdb_context *, TDB_DATA) tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA) tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_close: int (struct tdb_context *) tdb_delete: int (struct tdb_context *, TDB_DATA) tdb_dump_all: void (struct tdb_context *) tdb_enable_seqnum: void (struct tdb_context *) tdb_error: enum TDB_ERROR (struct tdb_context *) tdb_errorstr: const char *(struct tdb_context *) tdb_exists: int (struct tdb_context *, TDB_DATA) tdb_fd: int (struct tdb_context *) tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_firstkey: TDB_DATA (struct tdb_context *) tdb_freelist_size: int (struct tdb_context *) tdb_get_flags: int (struct tdb_context *) tdb_get_logging_private: void *(struct tdb_context *) tdb_get_seqnum: int (struct tdb_context *) tdb_hash_size: int (struct tdb_context *) tdb_increment_seqnum_nonblock: void (struct tdb_context *) tdb_jenkins_hash: unsigned int (TDB_DATA *) tdb_lock_nonblock: int (struct tdb_context *, int, int) tdb_lockall: int (struct tdb_context *) tdb_lockall_mark: int (struct tdb_context *) tdb_lockall_nonblock: int (struct tdb_context *) tdb_lockall_read: int (struct tdb_context *) tdb_lockall_read_nonblock: int (struct tdb_context *) tdb_lockall_unmark: int (struct tdb_context *) tdb_log_fn: tdb_log_func (struct tdb_context *) tdb_map_size: size_t (struct tdb_context *) tdb_name: const char *(struct tdb_context *) tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_null: dptr = 0xXXXX, dsize = 0 tdb_open: struct tdb_context *(const char *, int, int, int, mode_t) tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func) tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_printfreelist: int (struct tdb_context *) tdb_remove_flags: void (struct tdb_context *, unsigned int) tdb_reopen: int (struct tdb_context *) tdb_reopen_all: int (int) tdb_repack: int (struct tdb_context *) tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *) tdb_set_max_dead: void (struct tdb_context *, int) tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *) tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int) tdb_summary: char *(struct tdb_context *) tdb_transaction_cancel: int (struct tdb_context *) tdb_transaction_commit: int (struct tdb_context *) tdb_transaction_prepare_commit: int (struct tdb_context *) tdb_transaction_start: int (struct tdb_context *) tdb_transaction_start_nonblock: int (struct tdb_context *) tdb_transaction_write_lock_mark: int (struct tdb_context *) tdb_transaction_write_lock_unmark: int (struct tdb_context *) tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *) tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *) tdb_unlock: int (struct tdb_context *, int, int) tdb_unlockall: int (struct tdb_context *) tdb_unlockall_read: int (struct tdb_context *) tdb_validate_freelist: int (struct tdb_context *, int *) tdb_wipe_all: int (struct tdb_context *) tdb-1.4.2/ABI/tdb-1.2.11.sigs0000660000000000000000000000704612406075657015017 0ustar rootroot00000000000000tdb_add_flags: void (struct tdb_context *, unsigned int) tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA) tdb_chainlock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA) tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read: int (struct tdb_context *, TDB_DATA) tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA) tdb_chainunlock: int (struct tdb_context *, TDB_DATA) tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA) tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_close: int (struct tdb_context *) tdb_delete: int (struct tdb_context *, TDB_DATA) tdb_dump_all: void (struct tdb_context *) tdb_enable_seqnum: void (struct tdb_context *) tdb_error: enum TDB_ERROR (struct tdb_context *) tdb_errorstr: const char *(struct tdb_context *) tdb_exists: int (struct tdb_context *, TDB_DATA) tdb_fd: int (struct tdb_context *) tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_firstkey: TDB_DATA (struct tdb_context *) tdb_freelist_size: int (struct tdb_context *) tdb_get_flags: int (struct tdb_context *) tdb_get_logging_private: void *(struct tdb_context *) tdb_get_seqnum: int (struct tdb_context *) tdb_hash_size: int (struct tdb_context *) tdb_increment_seqnum_nonblock: void (struct tdb_context *) tdb_jenkins_hash: unsigned int (TDB_DATA *) tdb_lock_nonblock: int (struct tdb_context *, int, int) tdb_lockall: int (struct tdb_context *) tdb_lockall_mark: int (struct tdb_context *) tdb_lockall_nonblock: int (struct tdb_context *) tdb_lockall_read: int (struct tdb_context *) tdb_lockall_read_nonblock: int (struct tdb_context *) tdb_lockall_unmark: int (struct tdb_context *) tdb_log_fn: tdb_log_func (struct tdb_context *) tdb_map_size: size_t (struct tdb_context *) tdb_name: const char *(struct tdb_context *) tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_null: dptr = 0xXXXX, dsize = 0 tdb_open: struct tdb_context *(const char *, int, int, int, mode_t) tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func) tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_printfreelist: int (struct tdb_context *) tdb_remove_flags: void (struct tdb_context *, unsigned int) tdb_reopen: int (struct tdb_context *) tdb_reopen_all: int (int) tdb_repack: int (struct tdb_context *) tdb_rescue: int (struct tdb_context *, void (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *) tdb_set_max_dead: void (struct tdb_context *, int) tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *) tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int) tdb_summary: char *(struct tdb_context *) tdb_transaction_cancel: int (struct tdb_context *) tdb_transaction_commit: int (struct tdb_context *) tdb_transaction_prepare_commit: int (struct tdb_context *) tdb_transaction_start: int (struct tdb_context *) tdb_transaction_start_nonblock: int (struct tdb_context *) tdb_transaction_write_lock_mark: int (struct tdb_context *) tdb_transaction_write_lock_unmark: int (struct tdb_context *) tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *) tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *) tdb_unlock: int (struct tdb_context *, int, int) tdb_unlockall: int (struct tdb_context *) tdb_unlockall_read: int (struct tdb_context *) tdb_validate_freelist: int (struct tdb_context *, int *) tdb_wipe_all: int (struct tdb_context *) tdb-1.4.2/ABI/tdb-1.2.12.sigs0000660000000000000000000000704612406075657015020 0ustar rootroot00000000000000tdb_add_flags: void (struct tdb_context *, unsigned int) tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA) tdb_chainlock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA) tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read: int (struct tdb_context *, TDB_DATA) tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA) tdb_chainunlock: int (struct tdb_context *, TDB_DATA) tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA) tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_close: int (struct tdb_context *) tdb_delete: int (struct tdb_context *, TDB_DATA) tdb_dump_all: void (struct tdb_context *) tdb_enable_seqnum: void (struct tdb_context *) tdb_error: enum TDB_ERROR (struct tdb_context *) tdb_errorstr: const char *(struct tdb_context *) tdb_exists: int (struct tdb_context *, TDB_DATA) tdb_fd: int (struct tdb_context *) tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_firstkey: TDB_DATA (struct tdb_context *) tdb_freelist_size: int (struct tdb_context *) tdb_get_flags: int (struct tdb_context *) tdb_get_logging_private: void *(struct tdb_context *) tdb_get_seqnum: int (struct tdb_context *) tdb_hash_size: int (struct tdb_context *) tdb_increment_seqnum_nonblock: void (struct tdb_context *) tdb_jenkins_hash: unsigned int (TDB_DATA *) tdb_lock_nonblock: int (struct tdb_context *, int, int) tdb_lockall: int (struct tdb_context *) tdb_lockall_mark: int (struct tdb_context *) tdb_lockall_nonblock: int (struct tdb_context *) tdb_lockall_read: int (struct tdb_context *) tdb_lockall_read_nonblock: int (struct tdb_context *) tdb_lockall_unmark: int (struct tdb_context *) tdb_log_fn: tdb_log_func (struct tdb_context *) tdb_map_size: size_t (struct tdb_context *) tdb_name: const char *(struct tdb_context *) tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_null: dptr = 0xXXXX, dsize = 0 tdb_open: struct tdb_context *(const char *, int, int, int, mode_t) tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func) tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_printfreelist: int (struct tdb_context *) tdb_remove_flags: void (struct tdb_context *, unsigned int) tdb_reopen: int (struct tdb_context *) tdb_reopen_all: int (int) tdb_repack: int (struct tdb_context *) tdb_rescue: int (struct tdb_context *, void (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *) tdb_set_max_dead: void (struct tdb_context *, int) tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *) tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int) tdb_summary: char *(struct tdb_context *) tdb_transaction_cancel: int (struct tdb_context *) tdb_transaction_commit: int (struct tdb_context *) tdb_transaction_prepare_commit: int (struct tdb_context *) tdb_transaction_start: int (struct tdb_context *) tdb_transaction_start_nonblock: int (struct tdb_context *) tdb_transaction_write_lock_mark: int (struct tdb_context *) tdb_transaction_write_lock_unmark: int (struct tdb_context *) tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *) tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *) tdb_unlock: int (struct tdb_context *, int, int) tdb_unlockall: int (struct tdb_context *) tdb_unlockall_read: int (struct tdb_context *) tdb_validate_freelist: int (struct tdb_context *, int *) tdb_wipe_all: int (struct tdb_context *) tdb-1.4.2/ABI/tdb-1.2.13.sigs0000660000000000000000000000704612406075657015021 0ustar rootroot00000000000000tdb_add_flags: void (struct tdb_context *, unsigned int) tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA) tdb_chainlock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA) tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read: int (struct tdb_context *, TDB_DATA) tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA) tdb_chainunlock: int (struct tdb_context *, TDB_DATA) tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA) tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_close: int (struct tdb_context *) tdb_delete: int (struct tdb_context *, TDB_DATA) tdb_dump_all: void (struct tdb_context *) tdb_enable_seqnum: void (struct tdb_context *) tdb_error: enum TDB_ERROR (struct tdb_context *) tdb_errorstr: const char *(struct tdb_context *) tdb_exists: int (struct tdb_context *, TDB_DATA) tdb_fd: int (struct tdb_context *) tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_firstkey: TDB_DATA (struct tdb_context *) tdb_freelist_size: int (struct tdb_context *) tdb_get_flags: int (struct tdb_context *) tdb_get_logging_private: void *(struct tdb_context *) tdb_get_seqnum: int (struct tdb_context *) tdb_hash_size: int (struct tdb_context *) tdb_increment_seqnum_nonblock: void (struct tdb_context *) tdb_jenkins_hash: unsigned int (TDB_DATA *) tdb_lock_nonblock: int (struct tdb_context *, int, int) tdb_lockall: int (struct tdb_context *) tdb_lockall_mark: int (struct tdb_context *) tdb_lockall_nonblock: int (struct tdb_context *) tdb_lockall_read: int (struct tdb_context *) tdb_lockall_read_nonblock: int (struct tdb_context *) tdb_lockall_unmark: int (struct tdb_context *) tdb_log_fn: tdb_log_func (struct tdb_context *) tdb_map_size: size_t (struct tdb_context *) tdb_name: const char *(struct tdb_context *) tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_null: dptr = 0xXXXX, dsize = 0 tdb_open: struct tdb_context *(const char *, int, int, int, mode_t) tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func) tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_printfreelist: int (struct tdb_context *) tdb_remove_flags: void (struct tdb_context *, unsigned int) tdb_reopen: int (struct tdb_context *) tdb_reopen_all: int (int) tdb_repack: int (struct tdb_context *) tdb_rescue: int (struct tdb_context *, void (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *) tdb_set_max_dead: void (struct tdb_context *, int) tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *) tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int) tdb_summary: char *(struct tdb_context *) tdb_transaction_cancel: int (struct tdb_context *) tdb_transaction_commit: int (struct tdb_context *) tdb_transaction_prepare_commit: int (struct tdb_context *) tdb_transaction_start: int (struct tdb_context *) tdb_transaction_start_nonblock: int (struct tdb_context *) tdb_transaction_write_lock_mark: int (struct tdb_context *) tdb_transaction_write_lock_unmark: int (struct tdb_context *) tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *) tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *) tdb_unlock: int (struct tdb_context *, int, int) tdb_unlockall: int (struct tdb_context *) tdb_unlockall_read: int (struct tdb_context *) tdb_validate_freelist: int (struct tdb_context *, int *) tdb_wipe_all: int (struct tdb_context *) tdb-1.4.2/ABI/tdb-1.2.2.sigs0000660000000000000000000000623012406075657014731 0ustar rootroot00000000000000tdb_add_flags: void (struct tdb_context *, unsigned int) tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA) tdb_chainlock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA) tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read: int (struct tdb_context *, TDB_DATA) tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA) tdb_chainunlock: int (struct tdb_context *, TDB_DATA) tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA) tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_close: int (struct tdb_context *) tdb_delete: int (struct tdb_context *, TDB_DATA) tdb_dump_all: void (struct tdb_context *) tdb_enable_seqnum: void (struct tdb_context *) tdb_error: enum TDB_ERROR (struct tdb_context *) tdb_errorstr: const char *(struct tdb_context *) tdb_exists: int (struct tdb_context *, TDB_DATA) tdb_fd: int (struct tdb_context *) tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_firstkey: TDB_DATA (struct tdb_context *) tdb_freelist_size: int (struct tdb_context *) tdb_get_flags: int (struct tdb_context *) tdb_get_logging_private: void *(struct tdb_context *) tdb_get_seqnum: int (struct tdb_context *) tdb_hash_size: int (struct tdb_context *) tdb_increment_seqnum_nonblock: void (struct tdb_context *) tdb_lockall: int (struct tdb_context *) tdb_lockall_mark: int (struct tdb_context *) tdb_lockall_nonblock: int (struct tdb_context *) tdb_lockall_read: int (struct tdb_context *) tdb_lockall_read_nonblock: int (struct tdb_context *) tdb_lockall_unmark: int (struct tdb_context *) tdb_log_fn: tdb_log_func (struct tdb_context *) tdb_map_size: size_t (struct tdb_context *) tdb_name: const char *(struct tdb_context *) tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_null: dptr = 0xXXXX, dsize = 0 tdb_open: struct tdb_context *(const char *, int, int, int, mode_t) tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func) tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_printfreelist: int (struct tdb_context *) tdb_remove_flags: void (struct tdb_context *, unsigned int) tdb_reopen: int (struct tdb_context *) tdb_reopen_all: int (int) tdb_repack: int (struct tdb_context *) tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *) tdb_set_max_dead: void (struct tdb_context *, int) tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *) tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int) tdb_transaction_cancel: int (struct tdb_context *) tdb_transaction_commit: int (struct tdb_context *) tdb_transaction_prepare_commit: int (struct tdb_context *) tdb_transaction_start: int (struct tdb_context *) tdb_transaction_start_nonblock: int (struct tdb_context *) tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *) tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *) tdb_unlockall: int (struct tdb_context *) tdb_unlockall_read: int (struct tdb_context *) tdb_validate_freelist: int (struct tdb_context *, int *) tdb_wipe_all: int (struct tdb_context *) tdb-1.4.2/ABI/tdb-1.2.3.sigs0000660000000000000000000000623012406075657014732 0ustar rootroot00000000000000tdb_add_flags: void (struct tdb_context *, unsigned int) tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA) tdb_chainlock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA) tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read: int (struct tdb_context *, TDB_DATA) tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA) tdb_chainunlock: int (struct tdb_context *, TDB_DATA) tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA) tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_close: int (struct tdb_context *) tdb_delete: int (struct tdb_context *, TDB_DATA) tdb_dump_all: void (struct tdb_context *) tdb_enable_seqnum: void (struct tdb_context *) tdb_error: enum TDB_ERROR (struct tdb_context *) tdb_errorstr: const char *(struct tdb_context *) tdb_exists: int (struct tdb_context *, TDB_DATA) tdb_fd: int (struct tdb_context *) tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_firstkey: TDB_DATA (struct tdb_context *) tdb_freelist_size: int (struct tdb_context *) tdb_get_flags: int (struct tdb_context *) tdb_get_logging_private: void *(struct tdb_context *) tdb_get_seqnum: int (struct tdb_context *) tdb_hash_size: int (struct tdb_context *) tdb_increment_seqnum_nonblock: void (struct tdb_context *) tdb_lockall: int (struct tdb_context *) tdb_lockall_mark: int (struct tdb_context *) tdb_lockall_nonblock: int (struct tdb_context *) tdb_lockall_read: int (struct tdb_context *) tdb_lockall_read_nonblock: int (struct tdb_context *) tdb_lockall_unmark: int (struct tdb_context *) tdb_log_fn: tdb_log_func (struct tdb_context *) tdb_map_size: size_t (struct tdb_context *) tdb_name: const char *(struct tdb_context *) tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_null: dptr = 0xXXXX, dsize = 0 tdb_open: struct tdb_context *(const char *, int, int, int, mode_t) tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func) tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_printfreelist: int (struct tdb_context *) tdb_remove_flags: void (struct tdb_context *, unsigned int) tdb_reopen: int (struct tdb_context *) tdb_reopen_all: int (int) tdb_repack: int (struct tdb_context *) tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *) tdb_set_max_dead: void (struct tdb_context *, int) tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *) tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int) tdb_transaction_cancel: int (struct tdb_context *) tdb_transaction_commit: int (struct tdb_context *) tdb_transaction_prepare_commit: int (struct tdb_context *) tdb_transaction_start: int (struct tdb_context *) tdb_transaction_start_nonblock: int (struct tdb_context *) tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *) tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *) tdb_unlockall: int (struct tdb_context *) tdb_unlockall_read: int (struct tdb_context *) tdb_validate_freelist: int (struct tdb_context *, int *) tdb_wipe_all: int (struct tdb_context *) tdb-1.4.2/ABI/tdb-1.2.4.sigs0000660000000000000000000000623012406075657014733 0ustar rootroot00000000000000tdb_add_flags: void (struct tdb_context *, unsigned int) tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA) tdb_chainlock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA) tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read: int (struct tdb_context *, TDB_DATA) tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA) tdb_chainunlock: int (struct tdb_context *, TDB_DATA) tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA) tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_close: int (struct tdb_context *) tdb_delete: int (struct tdb_context *, TDB_DATA) tdb_dump_all: void (struct tdb_context *) tdb_enable_seqnum: void (struct tdb_context *) tdb_error: enum TDB_ERROR (struct tdb_context *) tdb_errorstr: const char *(struct tdb_context *) tdb_exists: int (struct tdb_context *, TDB_DATA) tdb_fd: int (struct tdb_context *) tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_firstkey: TDB_DATA (struct tdb_context *) tdb_freelist_size: int (struct tdb_context *) tdb_get_flags: int (struct tdb_context *) tdb_get_logging_private: void *(struct tdb_context *) tdb_get_seqnum: int (struct tdb_context *) tdb_hash_size: int (struct tdb_context *) tdb_increment_seqnum_nonblock: void (struct tdb_context *) tdb_lockall: int (struct tdb_context *) tdb_lockall_mark: int (struct tdb_context *) tdb_lockall_nonblock: int (struct tdb_context *) tdb_lockall_read: int (struct tdb_context *) tdb_lockall_read_nonblock: int (struct tdb_context *) tdb_lockall_unmark: int (struct tdb_context *) tdb_log_fn: tdb_log_func (struct tdb_context *) tdb_map_size: size_t (struct tdb_context *) tdb_name: const char *(struct tdb_context *) tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_null: dptr = 0xXXXX, dsize = 0 tdb_open: struct tdb_context *(const char *, int, int, int, mode_t) tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func) tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_printfreelist: int (struct tdb_context *) tdb_remove_flags: void (struct tdb_context *, unsigned int) tdb_reopen: int (struct tdb_context *) tdb_reopen_all: int (int) tdb_repack: int (struct tdb_context *) tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *) tdb_set_max_dead: void (struct tdb_context *, int) tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *) tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int) tdb_transaction_cancel: int (struct tdb_context *) tdb_transaction_commit: int (struct tdb_context *) tdb_transaction_prepare_commit: int (struct tdb_context *) tdb_transaction_start: int (struct tdb_context *) tdb_transaction_start_nonblock: int (struct tdb_context *) tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *) tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *) tdb_unlockall: int (struct tdb_context *) tdb_unlockall_read: int (struct tdb_context *) tdb_validate_freelist: int (struct tdb_context *, int *) tdb_wipe_all: int (struct tdb_context *) tdb-1.4.2/ABI/tdb-1.2.5.sigs0000660000000000000000000000630412406075657014736 0ustar rootroot00000000000000tdb_add_flags: void (struct tdb_context *, unsigned int) tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA) tdb_chainlock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA) tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read: int (struct tdb_context *, TDB_DATA) tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA) tdb_chainunlock: int (struct tdb_context *, TDB_DATA) tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA) tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_close: int (struct tdb_context *) tdb_delete: int (struct tdb_context *, TDB_DATA) tdb_dump_all: void (struct tdb_context *) tdb_enable_seqnum: void (struct tdb_context *) tdb_error: enum TDB_ERROR (struct tdb_context *) tdb_errorstr: const char *(struct tdb_context *) tdb_exists: int (struct tdb_context *, TDB_DATA) tdb_fd: int (struct tdb_context *) tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_firstkey: TDB_DATA (struct tdb_context *) tdb_freelist_size: int (struct tdb_context *) tdb_get_flags: int (struct tdb_context *) tdb_get_logging_private: void *(struct tdb_context *) tdb_get_seqnum: int (struct tdb_context *) tdb_hash_size: int (struct tdb_context *) tdb_increment_seqnum_nonblock: void (struct tdb_context *) tdb_jenkins_hash: unsigned int (TDB_DATA *) tdb_lockall: int (struct tdb_context *) tdb_lockall_mark: int (struct tdb_context *) tdb_lockall_nonblock: int (struct tdb_context *) tdb_lockall_read: int (struct tdb_context *) tdb_lockall_read_nonblock: int (struct tdb_context *) tdb_lockall_unmark: int (struct tdb_context *) tdb_log_fn: tdb_log_func (struct tdb_context *) tdb_map_size: size_t (struct tdb_context *) tdb_name: const char *(struct tdb_context *) tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_null: dptr = 0xXXXX, dsize = 0 tdb_open: struct tdb_context *(const char *, int, int, int, mode_t) tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func) tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_printfreelist: int (struct tdb_context *) tdb_remove_flags: void (struct tdb_context *, unsigned int) tdb_reopen: int (struct tdb_context *) tdb_reopen_all: int (int) tdb_repack: int (struct tdb_context *) tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *) tdb_set_max_dead: void (struct tdb_context *, int) tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *) tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int) tdb_transaction_cancel: int (struct tdb_context *) tdb_transaction_commit: int (struct tdb_context *) tdb_transaction_prepare_commit: int (struct tdb_context *) tdb_transaction_start: int (struct tdb_context *) tdb_transaction_start_nonblock: int (struct tdb_context *) tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *) tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *) tdb_unlockall: int (struct tdb_context *) tdb_unlockall_read: int (struct tdb_context *) tdb_validate_freelist: int (struct tdb_context *, int *) tdb_wipe_all: int (struct tdb_context *) tdb-1.4.2/ABI/tdb-1.2.6.sigs0000660000000000000000000000630412406075657014737 0ustar rootroot00000000000000tdb_add_flags: void (struct tdb_context *, unsigned int) tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA) tdb_chainlock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA) tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read: int (struct tdb_context *, TDB_DATA) tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA) tdb_chainunlock: int (struct tdb_context *, TDB_DATA) tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA) tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_close: int (struct tdb_context *) tdb_delete: int (struct tdb_context *, TDB_DATA) tdb_dump_all: void (struct tdb_context *) tdb_enable_seqnum: void (struct tdb_context *) tdb_error: enum TDB_ERROR (struct tdb_context *) tdb_errorstr: const char *(struct tdb_context *) tdb_exists: int (struct tdb_context *, TDB_DATA) tdb_fd: int (struct tdb_context *) tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_firstkey: TDB_DATA (struct tdb_context *) tdb_freelist_size: int (struct tdb_context *) tdb_get_flags: int (struct tdb_context *) tdb_get_logging_private: void *(struct tdb_context *) tdb_get_seqnum: int (struct tdb_context *) tdb_hash_size: int (struct tdb_context *) tdb_increment_seqnum_nonblock: void (struct tdb_context *) tdb_jenkins_hash: unsigned int (TDB_DATA *) tdb_lockall: int (struct tdb_context *) tdb_lockall_mark: int (struct tdb_context *) tdb_lockall_nonblock: int (struct tdb_context *) tdb_lockall_read: int (struct tdb_context *) tdb_lockall_read_nonblock: int (struct tdb_context *) tdb_lockall_unmark: int (struct tdb_context *) tdb_log_fn: tdb_log_func (struct tdb_context *) tdb_map_size: size_t (struct tdb_context *) tdb_name: const char *(struct tdb_context *) tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_null: dptr = 0xXXXX, dsize = 0 tdb_open: struct tdb_context *(const char *, int, int, int, mode_t) tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func) tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_printfreelist: int (struct tdb_context *) tdb_remove_flags: void (struct tdb_context *, unsigned int) tdb_reopen: int (struct tdb_context *) tdb_reopen_all: int (int) tdb_repack: int (struct tdb_context *) tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *) tdb_set_max_dead: void (struct tdb_context *, int) tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *) tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int) tdb_transaction_cancel: int (struct tdb_context *) tdb_transaction_commit: int (struct tdb_context *) tdb_transaction_prepare_commit: int (struct tdb_context *) tdb_transaction_start: int (struct tdb_context *) tdb_transaction_start_nonblock: int (struct tdb_context *) tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *) tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *) tdb_unlockall: int (struct tdb_context *) tdb_unlockall_read: int (struct tdb_context *) tdb_validate_freelist: int (struct tdb_context *, int *) tdb_wipe_all: int (struct tdb_context *) tdb-1.4.2/ABI/tdb-1.2.7.sigs0000660000000000000000000000630412406075657014740 0ustar rootroot00000000000000tdb_add_flags: void (struct tdb_context *, unsigned int) tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA) tdb_chainlock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA) tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read: int (struct tdb_context *, TDB_DATA) tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA) tdb_chainunlock: int (struct tdb_context *, TDB_DATA) tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA) tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_close: int (struct tdb_context *) tdb_delete: int (struct tdb_context *, TDB_DATA) tdb_dump_all: void (struct tdb_context *) tdb_enable_seqnum: void (struct tdb_context *) tdb_error: enum TDB_ERROR (struct tdb_context *) tdb_errorstr: const char *(struct tdb_context *) tdb_exists: int (struct tdb_context *, TDB_DATA) tdb_fd: int (struct tdb_context *) tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_firstkey: TDB_DATA (struct tdb_context *) tdb_freelist_size: int (struct tdb_context *) tdb_get_flags: int (struct tdb_context *) tdb_get_logging_private: void *(struct tdb_context *) tdb_get_seqnum: int (struct tdb_context *) tdb_hash_size: int (struct tdb_context *) tdb_increment_seqnum_nonblock: void (struct tdb_context *) tdb_jenkins_hash: unsigned int (TDB_DATA *) tdb_lockall: int (struct tdb_context *) tdb_lockall_mark: int (struct tdb_context *) tdb_lockall_nonblock: int (struct tdb_context *) tdb_lockall_read: int (struct tdb_context *) tdb_lockall_read_nonblock: int (struct tdb_context *) tdb_lockall_unmark: int (struct tdb_context *) tdb_log_fn: tdb_log_func (struct tdb_context *) tdb_map_size: size_t (struct tdb_context *) tdb_name: const char *(struct tdb_context *) tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_null: dptr = 0xXXXX, dsize = 0 tdb_open: struct tdb_context *(const char *, int, int, int, mode_t) tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func) tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_printfreelist: int (struct tdb_context *) tdb_remove_flags: void (struct tdb_context *, unsigned int) tdb_reopen: int (struct tdb_context *) tdb_reopen_all: int (int) tdb_repack: int (struct tdb_context *) tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *) tdb_set_max_dead: void (struct tdb_context *, int) tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *) tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int) tdb_transaction_cancel: int (struct tdb_context *) tdb_transaction_commit: int (struct tdb_context *) tdb_transaction_prepare_commit: int (struct tdb_context *) tdb_transaction_start: int (struct tdb_context *) tdb_transaction_start_nonblock: int (struct tdb_context *) tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *) tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *) tdb_unlockall: int (struct tdb_context *) tdb_unlockall_read: int (struct tdb_context *) tdb_validate_freelist: int (struct tdb_context *, int *) tdb_wipe_all: int (struct tdb_context *) tdb-1.4.2/ABI/tdb-1.2.8.sigs0000660000000000000000000000630412406075657014741 0ustar rootroot00000000000000tdb_add_flags: void (struct tdb_context *, unsigned int) tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA) tdb_chainlock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA) tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read: int (struct tdb_context *, TDB_DATA) tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA) tdb_chainunlock: int (struct tdb_context *, TDB_DATA) tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA) tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_close: int (struct tdb_context *) tdb_delete: int (struct tdb_context *, TDB_DATA) tdb_dump_all: void (struct tdb_context *) tdb_enable_seqnum: void (struct tdb_context *) tdb_error: enum TDB_ERROR (struct tdb_context *) tdb_errorstr: const char *(struct tdb_context *) tdb_exists: int (struct tdb_context *, TDB_DATA) tdb_fd: int (struct tdb_context *) tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_firstkey: TDB_DATA (struct tdb_context *) tdb_freelist_size: int (struct tdb_context *) tdb_get_flags: int (struct tdb_context *) tdb_get_logging_private: void *(struct tdb_context *) tdb_get_seqnum: int (struct tdb_context *) tdb_hash_size: int (struct tdb_context *) tdb_increment_seqnum_nonblock: void (struct tdb_context *) tdb_jenkins_hash: unsigned int (TDB_DATA *) tdb_lockall: int (struct tdb_context *) tdb_lockall_mark: int (struct tdb_context *) tdb_lockall_nonblock: int (struct tdb_context *) tdb_lockall_read: int (struct tdb_context *) tdb_lockall_read_nonblock: int (struct tdb_context *) tdb_lockall_unmark: int (struct tdb_context *) tdb_log_fn: tdb_log_func (struct tdb_context *) tdb_map_size: size_t (struct tdb_context *) tdb_name: const char *(struct tdb_context *) tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_null: dptr = 0xXXXX, dsize = 0 tdb_open: struct tdb_context *(const char *, int, int, int, mode_t) tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func) tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_printfreelist: int (struct tdb_context *) tdb_remove_flags: void (struct tdb_context *, unsigned int) tdb_reopen: int (struct tdb_context *) tdb_reopen_all: int (int) tdb_repack: int (struct tdb_context *) tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *) tdb_set_max_dead: void (struct tdb_context *, int) tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *) tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int) tdb_transaction_cancel: int (struct tdb_context *) tdb_transaction_commit: int (struct tdb_context *) tdb_transaction_prepare_commit: int (struct tdb_context *) tdb_transaction_start: int (struct tdb_context *) tdb_transaction_start_nonblock: int (struct tdb_context *) tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *) tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *) tdb_unlockall: int (struct tdb_context *) tdb_unlockall_read: int (struct tdb_context *) tdb_validate_freelist: int (struct tdb_context *, int *) tdb_wipe_all: int (struct tdb_context *) tdb-1.4.2/ABI/tdb-1.2.9.sigs0000660000000000000000000000635612406075657014751 0ustar rootroot00000000000000tdb_add_flags: void (struct tdb_context *, unsigned int) tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA) tdb_chainlock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA) tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read: int (struct tdb_context *, TDB_DATA) tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA) tdb_chainunlock: int (struct tdb_context *, TDB_DATA) tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA) tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_close: int (struct tdb_context *) tdb_delete: int (struct tdb_context *, TDB_DATA) tdb_dump_all: void (struct tdb_context *) tdb_enable_seqnum: void (struct tdb_context *) tdb_error: enum TDB_ERROR (struct tdb_context *) tdb_errorstr: const char *(struct tdb_context *) tdb_exists: int (struct tdb_context *, TDB_DATA) tdb_fd: int (struct tdb_context *) tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_firstkey: TDB_DATA (struct tdb_context *) tdb_freelist_size: int (struct tdb_context *) tdb_get_flags: int (struct tdb_context *) tdb_get_logging_private: void *(struct tdb_context *) tdb_get_seqnum: int (struct tdb_context *) tdb_hash_size: int (struct tdb_context *) tdb_increment_seqnum_nonblock: void (struct tdb_context *) tdb_jenkins_hash: unsigned int (TDB_DATA *) tdb_lockall: int (struct tdb_context *) tdb_lockall_mark: int (struct tdb_context *) tdb_lockall_nonblock: int (struct tdb_context *) tdb_lockall_read: int (struct tdb_context *) tdb_lockall_read_nonblock: int (struct tdb_context *) tdb_lockall_unmark: int (struct tdb_context *) tdb_log_fn: tdb_log_func (struct tdb_context *) tdb_map_size: size_t (struct tdb_context *) tdb_name: const char *(struct tdb_context *) tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_null: dptr = 0xXXXX, dsize = 0 tdb_open: struct tdb_context *(const char *, int, int, int, mode_t) tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func) tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_printfreelist: int (struct tdb_context *) tdb_remove_flags: void (struct tdb_context *, unsigned int) tdb_reopen: int (struct tdb_context *) tdb_reopen_all: int (int) tdb_repack: int (struct tdb_context *) tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *) tdb_set_max_dead: void (struct tdb_context *, int) tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *) tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int) tdb_summary: char *(struct tdb_context *) tdb_transaction_cancel: int (struct tdb_context *) tdb_transaction_commit: int (struct tdb_context *) tdb_transaction_prepare_commit: int (struct tdb_context *) tdb_transaction_start: int (struct tdb_context *) tdb_transaction_start_nonblock: int (struct tdb_context *) tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *) tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *) tdb_unlockall: int (struct tdb_context *) tdb_unlockall_read: int (struct tdb_context *) tdb_validate_freelist: int (struct tdb_context *, int *) tdb_wipe_all: int (struct tdb_context *) tdb-1.4.2/ABI/tdb-1.3.0.sigs0000660000000000000000000000713012406075657014730 0ustar rootroot00000000000000tdb_add_flags: void (struct tdb_context *, unsigned int) tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA) tdb_chainlock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA) tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read: int (struct tdb_context *, TDB_DATA) tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA) tdb_chainunlock: int (struct tdb_context *, TDB_DATA) tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA) tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_close: int (struct tdb_context *) tdb_delete: int (struct tdb_context *, TDB_DATA) tdb_dump_all: void (struct tdb_context *) tdb_enable_seqnum: void (struct tdb_context *) tdb_error: enum TDB_ERROR (struct tdb_context *) tdb_errorstr: const char *(struct tdb_context *) tdb_exists: int (struct tdb_context *, TDB_DATA) tdb_fd: int (struct tdb_context *) tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_firstkey: TDB_DATA (struct tdb_context *) tdb_freelist_size: int (struct tdb_context *) tdb_get_flags: int (struct tdb_context *) tdb_get_logging_private: void *(struct tdb_context *) tdb_get_seqnum: int (struct tdb_context *) tdb_hash_size: int (struct tdb_context *) tdb_increment_seqnum_nonblock: void (struct tdb_context *) tdb_jenkins_hash: unsigned int (TDB_DATA *) tdb_lock_nonblock: int (struct tdb_context *, int, int) tdb_lockall: int (struct tdb_context *) tdb_lockall_mark: int (struct tdb_context *) tdb_lockall_nonblock: int (struct tdb_context *) tdb_lockall_read: int (struct tdb_context *) tdb_lockall_read_nonblock: int (struct tdb_context *) tdb_lockall_unmark: int (struct tdb_context *) tdb_log_fn: tdb_log_func (struct tdb_context *) tdb_map_size: size_t (struct tdb_context *) tdb_name: const char *(struct tdb_context *) tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_null: dptr = 0xXXXX, dsize = 0 tdb_open: struct tdb_context *(const char *, int, int, int, mode_t) tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func) tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_printfreelist: int (struct tdb_context *) tdb_remove_flags: void (struct tdb_context *, unsigned int) tdb_reopen: int (struct tdb_context *) tdb_reopen_all: int (int) tdb_repack: int (struct tdb_context *) tdb_rescue: int (struct tdb_context *, void (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_runtime_check_for_robust_mutexes: bool (void) tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *) tdb_set_max_dead: void (struct tdb_context *, int) tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *) tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int) tdb_summary: char *(struct tdb_context *) tdb_transaction_cancel: int (struct tdb_context *) tdb_transaction_commit: int (struct tdb_context *) tdb_transaction_prepare_commit: int (struct tdb_context *) tdb_transaction_start: int (struct tdb_context *) tdb_transaction_start_nonblock: int (struct tdb_context *) tdb_transaction_write_lock_mark: int (struct tdb_context *) tdb_transaction_write_lock_unmark: int (struct tdb_context *) tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *) tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *) tdb_unlock: int (struct tdb_context *, int, int) tdb_unlockall: int (struct tdb_context *) tdb_unlockall_read: int (struct tdb_context *) tdb_validate_freelist: int (struct tdb_context *, int *) tdb_wipe_all: int (struct tdb_context *) tdb-1.4.2/ABI/tdb-1.3.1.sigs0000660000000000000000000000713012406075657014731 0ustar rootroot00000000000000tdb_add_flags: void (struct tdb_context *, unsigned int) tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA) tdb_chainlock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA) tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read: int (struct tdb_context *, TDB_DATA) tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA) tdb_chainunlock: int (struct tdb_context *, TDB_DATA) tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA) tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_close: int (struct tdb_context *) tdb_delete: int (struct tdb_context *, TDB_DATA) tdb_dump_all: void (struct tdb_context *) tdb_enable_seqnum: void (struct tdb_context *) tdb_error: enum TDB_ERROR (struct tdb_context *) tdb_errorstr: const char *(struct tdb_context *) tdb_exists: int (struct tdb_context *, TDB_DATA) tdb_fd: int (struct tdb_context *) tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_firstkey: TDB_DATA (struct tdb_context *) tdb_freelist_size: int (struct tdb_context *) tdb_get_flags: int (struct tdb_context *) tdb_get_logging_private: void *(struct tdb_context *) tdb_get_seqnum: int (struct tdb_context *) tdb_hash_size: int (struct tdb_context *) tdb_increment_seqnum_nonblock: void (struct tdb_context *) tdb_jenkins_hash: unsigned int (TDB_DATA *) tdb_lock_nonblock: int (struct tdb_context *, int, int) tdb_lockall: int (struct tdb_context *) tdb_lockall_mark: int (struct tdb_context *) tdb_lockall_nonblock: int (struct tdb_context *) tdb_lockall_read: int (struct tdb_context *) tdb_lockall_read_nonblock: int (struct tdb_context *) tdb_lockall_unmark: int (struct tdb_context *) tdb_log_fn: tdb_log_func (struct tdb_context *) tdb_map_size: size_t (struct tdb_context *) tdb_name: const char *(struct tdb_context *) tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_null: dptr = 0xXXXX, dsize = 0 tdb_open: struct tdb_context *(const char *, int, int, int, mode_t) tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func) tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_printfreelist: int (struct tdb_context *) tdb_remove_flags: void (struct tdb_context *, unsigned int) tdb_reopen: int (struct tdb_context *) tdb_reopen_all: int (int) tdb_repack: int (struct tdb_context *) tdb_rescue: int (struct tdb_context *, void (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_runtime_check_for_robust_mutexes: bool (void) tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *) tdb_set_max_dead: void (struct tdb_context *, int) tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *) tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int) tdb_summary: char *(struct tdb_context *) tdb_transaction_cancel: int (struct tdb_context *) tdb_transaction_commit: int (struct tdb_context *) tdb_transaction_prepare_commit: int (struct tdb_context *) tdb_transaction_start: int (struct tdb_context *) tdb_transaction_start_nonblock: int (struct tdb_context *) tdb_transaction_write_lock_mark: int (struct tdb_context *) tdb_transaction_write_lock_unmark: int (struct tdb_context *) tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *) tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *) tdb_unlock: int (struct tdb_context *, int, int) tdb_unlockall: int (struct tdb_context *) tdb_unlockall_read: int (struct tdb_context *) tdb_validate_freelist: int (struct tdb_context *, int *) tdb_wipe_all: int (struct tdb_context *) tdb-1.4.2/ABI/tdb-1.3.10.sigs0000660000000000000000000000723212746330636015012 0ustar rootroot00000000000000tdb_add_flags: void (struct tdb_context *, unsigned int) tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA) tdb_chainlock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA) tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA) tdb_chainunlock: int (struct tdb_context *, TDB_DATA) tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA) tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_close: int (struct tdb_context *) tdb_delete: int (struct tdb_context *, TDB_DATA) tdb_dump_all: void (struct tdb_context *) tdb_enable_seqnum: void (struct tdb_context *) tdb_error: enum TDB_ERROR (struct tdb_context *) tdb_errorstr: const char *(struct tdb_context *) tdb_exists: int (struct tdb_context *, TDB_DATA) tdb_fd: int (struct tdb_context *) tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_firstkey: TDB_DATA (struct tdb_context *) tdb_freelist_size: int (struct tdb_context *) tdb_get_flags: int (struct tdb_context *) tdb_get_logging_private: void *(struct tdb_context *) tdb_get_seqnum: int (struct tdb_context *) tdb_hash_size: int (struct tdb_context *) tdb_increment_seqnum_nonblock: void (struct tdb_context *) tdb_jenkins_hash: unsigned int (TDB_DATA *) tdb_lock_nonblock: int (struct tdb_context *, int, int) tdb_lockall: int (struct tdb_context *) tdb_lockall_mark: int (struct tdb_context *) tdb_lockall_nonblock: int (struct tdb_context *) tdb_lockall_read: int (struct tdb_context *) tdb_lockall_read_nonblock: int (struct tdb_context *) tdb_lockall_unmark: int (struct tdb_context *) tdb_log_fn: tdb_log_func (struct tdb_context *) tdb_map_size: size_t (struct tdb_context *) tdb_name: const char *(struct tdb_context *) tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_null: dptr = 0xXXXX, dsize = 0 tdb_open: struct tdb_context *(const char *, int, int, int, mode_t) tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func) tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_printfreelist: int (struct tdb_context *) tdb_remove_flags: void (struct tdb_context *, unsigned int) tdb_reopen: int (struct tdb_context *) tdb_reopen_all: int (int) tdb_repack: int (struct tdb_context *) tdb_rescue: int (struct tdb_context *, void (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_runtime_check_for_robust_mutexes: bool (void) tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *) tdb_set_max_dead: void (struct tdb_context *, int) tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *) tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int) tdb_summary: char *(struct tdb_context *) tdb_transaction_cancel: int (struct tdb_context *) tdb_transaction_commit: int (struct tdb_context *) tdb_transaction_prepare_commit: int (struct tdb_context *) tdb_transaction_start: int (struct tdb_context *) tdb_transaction_start_nonblock: int (struct tdb_context *) tdb_transaction_write_lock_mark: int (struct tdb_context *) tdb_transaction_write_lock_unmark: int (struct tdb_context *) tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *) tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *) tdb_unlock: int (struct tdb_context *, int, int) tdb_unlockall: int (struct tdb_context *) tdb_unlockall_read: int (struct tdb_context *) tdb_validate_freelist: int (struct tdb_context *, int *) tdb_wipe_all: int (struct tdb_context *) tdb-1.4.2/ABI/tdb-1.3.11.sigs0000660000000000000000000000734712761221116015007 0ustar rootroot00000000000000tdb_add_flags: void (struct tdb_context *, unsigned int) tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA) tdb_chainlock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA) tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA) tdb_chainunlock: int (struct tdb_context *, TDB_DATA) tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA) tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_close: int (struct tdb_context *) tdb_delete: int (struct tdb_context *, TDB_DATA) tdb_dump_all: void (struct tdb_context *) tdb_enable_seqnum: void (struct tdb_context *) tdb_error: enum TDB_ERROR (struct tdb_context *) tdb_errorstr: const char *(struct tdb_context *) tdb_exists: int (struct tdb_context *, TDB_DATA) tdb_fd: int (struct tdb_context *) tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_firstkey: TDB_DATA (struct tdb_context *) tdb_freelist_size: int (struct tdb_context *) tdb_get_flags: int (struct tdb_context *) tdb_get_logging_private: void *(struct tdb_context *) tdb_get_seqnum: int (struct tdb_context *) tdb_hash_size: int (struct tdb_context *) tdb_increment_seqnum_nonblock: void (struct tdb_context *) tdb_jenkins_hash: unsigned int (TDB_DATA *) tdb_lock_nonblock: int (struct tdb_context *, int, int) tdb_lockall: int (struct tdb_context *) tdb_lockall_mark: int (struct tdb_context *) tdb_lockall_nonblock: int (struct tdb_context *) tdb_lockall_read: int (struct tdb_context *) tdb_lockall_read_nonblock: int (struct tdb_context *) tdb_lockall_unmark: int (struct tdb_context *) tdb_log_fn: tdb_log_func (struct tdb_context *) tdb_map_size: size_t (struct tdb_context *) tdb_name: const char *(struct tdb_context *) tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_null: dptr = 0xXXXX, dsize = 0 tdb_open: struct tdb_context *(const char *, int, int, int, mode_t) tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func) tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_printfreelist: int (struct tdb_context *) tdb_remove_flags: void (struct tdb_context *, unsigned int) tdb_reopen: int (struct tdb_context *) tdb_reopen_all: int (int) tdb_repack: int (struct tdb_context *) tdb_rescue: int (struct tdb_context *, void (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_runtime_check_for_robust_mutexes: bool (void) tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *) tdb_set_max_dead: void (struct tdb_context *, int) tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *) tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int) tdb_storev: int (struct tdb_context *, TDB_DATA, const TDB_DATA *, int, int) tdb_summary: char *(struct tdb_context *) tdb_transaction_cancel: int (struct tdb_context *) tdb_transaction_commit: int (struct tdb_context *) tdb_transaction_prepare_commit: int (struct tdb_context *) tdb_transaction_start: int (struct tdb_context *) tdb_transaction_start_nonblock: int (struct tdb_context *) tdb_transaction_write_lock_mark: int (struct tdb_context *) tdb_transaction_write_lock_unmark: int (struct tdb_context *) tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *) tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *) tdb_unlock: int (struct tdb_context *, int, int) tdb_unlockall: int (struct tdb_context *) tdb_unlockall_read: int (struct tdb_context *) tdb_validate_freelist: int (struct tdb_context *, int *) tdb_wipe_all: int (struct tdb_context *) tdb-1.4.2/ABI/tdb-1.3.12.sigs0000660000000000000000000000734713017565171015017 0ustar rootroot00000000000000tdb_add_flags: void (struct tdb_context *, unsigned int) tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA) tdb_chainlock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA) tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA) tdb_chainunlock: int (struct tdb_context *, TDB_DATA) tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA) tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_close: int (struct tdb_context *) tdb_delete: int (struct tdb_context *, TDB_DATA) tdb_dump_all: void (struct tdb_context *) tdb_enable_seqnum: void (struct tdb_context *) tdb_error: enum TDB_ERROR (struct tdb_context *) tdb_errorstr: const char *(struct tdb_context *) tdb_exists: int (struct tdb_context *, TDB_DATA) tdb_fd: int (struct tdb_context *) tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_firstkey: TDB_DATA (struct tdb_context *) tdb_freelist_size: int (struct tdb_context *) tdb_get_flags: int (struct tdb_context *) tdb_get_logging_private: void *(struct tdb_context *) tdb_get_seqnum: int (struct tdb_context *) tdb_hash_size: int (struct tdb_context *) tdb_increment_seqnum_nonblock: void (struct tdb_context *) tdb_jenkins_hash: unsigned int (TDB_DATA *) tdb_lock_nonblock: int (struct tdb_context *, int, int) tdb_lockall: int (struct tdb_context *) tdb_lockall_mark: int (struct tdb_context *) tdb_lockall_nonblock: int (struct tdb_context *) tdb_lockall_read: int (struct tdb_context *) tdb_lockall_read_nonblock: int (struct tdb_context *) tdb_lockall_unmark: int (struct tdb_context *) tdb_log_fn: tdb_log_func (struct tdb_context *) tdb_map_size: size_t (struct tdb_context *) tdb_name: const char *(struct tdb_context *) tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_null: dptr = 0xXXXX, dsize = 0 tdb_open: struct tdb_context *(const char *, int, int, int, mode_t) tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func) tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_printfreelist: int (struct tdb_context *) tdb_remove_flags: void (struct tdb_context *, unsigned int) tdb_reopen: int (struct tdb_context *) tdb_reopen_all: int (int) tdb_repack: int (struct tdb_context *) tdb_rescue: int (struct tdb_context *, void (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_runtime_check_for_robust_mutexes: bool (void) tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *) tdb_set_max_dead: void (struct tdb_context *, int) tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *) tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int) tdb_storev: int (struct tdb_context *, TDB_DATA, const TDB_DATA *, int, int) tdb_summary: char *(struct tdb_context *) tdb_transaction_cancel: int (struct tdb_context *) tdb_transaction_commit: int (struct tdb_context *) tdb_transaction_prepare_commit: int (struct tdb_context *) tdb_transaction_start: int (struct tdb_context *) tdb_transaction_start_nonblock: int (struct tdb_context *) tdb_transaction_write_lock_mark: int (struct tdb_context *) tdb_transaction_write_lock_unmark: int (struct tdb_context *) tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *) tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *) tdb_unlock: int (struct tdb_context *, int, int) tdb_unlockall: int (struct tdb_context *) tdb_unlockall_read: int (struct tdb_context *) tdb_validate_freelist: int (struct tdb_context *, int *) tdb_wipe_all: int (struct tdb_context *) tdb-1.4.2/ABI/tdb-1.3.13.sigs0000660000000000000000000000734713100601766015012 0ustar rootroot00000000000000tdb_add_flags: void (struct tdb_context *, unsigned int) tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA) tdb_chainlock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA) tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA) tdb_chainunlock: int (struct tdb_context *, TDB_DATA) tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA) tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_close: int (struct tdb_context *) tdb_delete: int (struct tdb_context *, TDB_DATA) tdb_dump_all: void (struct tdb_context *) tdb_enable_seqnum: void (struct tdb_context *) tdb_error: enum TDB_ERROR (struct tdb_context *) tdb_errorstr: const char *(struct tdb_context *) tdb_exists: int (struct tdb_context *, TDB_DATA) tdb_fd: int (struct tdb_context *) tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_firstkey: TDB_DATA (struct tdb_context *) tdb_freelist_size: int (struct tdb_context *) tdb_get_flags: int (struct tdb_context *) tdb_get_logging_private: void *(struct tdb_context *) tdb_get_seqnum: int (struct tdb_context *) tdb_hash_size: int (struct tdb_context *) tdb_increment_seqnum_nonblock: void (struct tdb_context *) tdb_jenkins_hash: unsigned int (TDB_DATA *) tdb_lock_nonblock: int (struct tdb_context *, int, int) tdb_lockall: int (struct tdb_context *) tdb_lockall_mark: int (struct tdb_context *) tdb_lockall_nonblock: int (struct tdb_context *) tdb_lockall_read: int (struct tdb_context *) tdb_lockall_read_nonblock: int (struct tdb_context *) tdb_lockall_unmark: int (struct tdb_context *) tdb_log_fn: tdb_log_func (struct tdb_context *) tdb_map_size: size_t (struct tdb_context *) tdb_name: const char *(struct tdb_context *) tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_null: dptr = 0xXXXX, dsize = 0 tdb_open: struct tdb_context *(const char *, int, int, int, mode_t) tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func) tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_printfreelist: int (struct tdb_context *) tdb_remove_flags: void (struct tdb_context *, unsigned int) tdb_reopen: int (struct tdb_context *) tdb_reopen_all: int (int) tdb_repack: int (struct tdb_context *) tdb_rescue: int (struct tdb_context *, void (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_runtime_check_for_robust_mutexes: bool (void) tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *) tdb_set_max_dead: void (struct tdb_context *, int) tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *) tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int) tdb_storev: int (struct tdb_context *, TDB_DATA, const TDB_DATA *, int, int) tdb_summary: char *(struct tdb_context *) tdb_transaction_cancel: int (struct tdb_context *) tdb_transaction_commit: int (struct tdb_context *) tdb_transaction_prepare_commit: int (struct tdb_context *) tdb_transaction_start: int (struct tdb_context *) tdb_transaction_start_nonblock: int (struct tdb_context *) tdb_transaction_write_lock_mark: int (struct tdb_context *) tdb_transaction_write_lock_unmark: int (struct tdb_context *) tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *) tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *) tdb_unlock: int (struct tdb_context *, int, int) tdb_unlockall: int (struct tdb_context *) tdb_unlockall_read: int (struct tdb_context *) tdb_validate_freelist: int (struct tdb_context *, int *) tdb_wipe_all: int (struct tdb_context *) tdb-1.4.2/ABI/tdb-1.3.14.sigs0000660000000000000000000000743313126252766015021 0ustar rootroot00000000000000tdb_add_flags: void (struct tdb_context *, unsigned int) tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA) tdb_chainlock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA) tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA) tdb_chainunlock: int (struct tdb_context *, TDB_DATA) tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA) tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_close: int (struct tdb_context *) tdb_delete: int (struct tdb_context *, TDB_DATA) tdb_dump_all: void (struct tdb_context *) tdb_enable_seqnum: void (struct tdb_context *) tdb_error: enum TDB_ERROR (struct tdb_context *) tdb_errorstr: const char *(struct tdb_context *) tdb_exists: int (struct tdb_context *, TDB_DATA) tdb_fd: int (struct tdb_context *) tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_firstkey: TDB_DATA (struct tdb_context *) tdb_freelist_size: int (struct tdb_context *) tdb_get_flags: int (struct tdb_context *) tdb_get_logging_private: void *(struct tdb_context *) tdb_get_seqnum: int (struct tdb_context *) tdb_hash_size: int (struct tdb_context *) tdb_increment_seqnum_nonblock: void (struct tdb_context *) tdb_jenkins_hash: unsigned int (TDB_DATA *) tdb_lock_nonblock: int (struct tdb_context *, int, int) tdb_lockall: int (struct tdb_context *) tdb_lockall_mark: int (struct tdb_context *) tdb_lockall_nonblock: int (struct tdb_context *) tdb_lockall_read: int (struct tdb_context *) tdb_lockall_read_nonblock: int (struct tdb_context *) tdb_lockall_unmark: int (struct tdb_context *) tdb_log_fn: tdb_log_func (struct tdb_context *) tdb_map_size: size_t (struct tdb_context *) tdb_name: const char *(struct tdb_context *) tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_null: dptr = 0xXXXX, dsize = 0 tdb_open: struct tdb_context *(const char *, int, int, int, mode_t) tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func) tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_printfreelist: int (struct tdb_context *) tdb_remove_flags: void (struct tdb_context *, unsigned int) tdb_reopen: int (struct tdb_context *) tdb_reopen_all: int (int) tdb_repack: int (struct tdb_context *) tdb_rescue: int (struct tdb_context *, void (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_runtime_check_for_robust_mutexes: bool (void) tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *) tdb_set_max_dead: void (struct tdb_context *, int) tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *) tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int) tdb_storev: int (struct tdb_context *, TDB_DATA, const TDB_DATA *, int, int) tdb_summary: char *(struct tdb_context *) tdb_transaction_active: bool (struct tdb_context *) tdb_transaction_cancel: int (struct tdb_context *) tdb_transaction_commit: int (struct tdb_context *) tdb_transaction_prepare_commit: int (struct tdb_context *) tdb_transaction_start: int (struct tdb_context *) tdb_transaction_start_nonblock: int (struct tdb_context *) tdb_transaction_write_lock_mark: int (struct tdb_context *) tdb_transaction_write_lock_unmark: int (struct tdb_context *) tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *) tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *) tdb_unlock: int (struct tdb_context *, int, int) tdb_unlockall: int (struct tdb_context *) tdb_unlockall_read: int (struct tdb_context *) tdb_validate_freelist: int (struct tdb_context *, int *) tdb_wipe_all: int (struct tdb_context *) tdb-1.4.2/ABI/tdb-1.3.15.sigs0000660000000000000000000000743313444661620015016 0ustar rootroot00000000000000tdb_add_flags: void (struct tdb_context *, unsigned int) tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA) tdb_chainlock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA) tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA) tdb_chainunlock: int (struct tdb_context *, TDB_DATA) tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA) tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_close: int (struct tdb_context *) tdb_delete: int (struct tdb_context *, TDB_DATA) tdb_dump_all: void (struct tdb_context *) tdb_enable_seqnum: void (struct tdb_context *) tdb_error: enum TDB_ERROR (struct tdb_context *) tdb_errorstr: const char *(struct tdb_context *) tdb_exists: int (struct tdb_context *, TDB_DATA) tdb_fd: int (struct tdb_context *) tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_firstkey: TDB_DATA (struct tdb_context *) tdb_freelist_size: int (struct tdb_context *) tdb_get_flags: int (struct tdb_context *) tdb_get_logging_private: void *(struct tdb_context *) tdb_get_seqnum: int (struct tdb_context *) tdb_hash_size: int (struct tdb_context *) tdb_increment_seqnum_nonblock: void (struct tdb_context *) tdb_jenkins_hash: unsigned int (TDB_DATA *) tdb_lock_nonblock: int (struct tdb_context *, int, int) tdb_lockall: int (struct tdb_context *) tdb_lockall_mark: int (struct tdb_context *) tdb_lockall_nonblock: int (struct tdb_context *) tdb_lockall_read: int (struct tdb_context *) tdb_lockall_read_nonblock: int (struct tdb_context *) tdb_lockall_unmark: int (struct tdb_context *) tdb_log_fn: tdb_log_func (struct tdb_context *) tdb_map_size: size_t (struct tdb_context *) tdb_name: const char *(struct tdb_context *) tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_null: dptr = 0xXXXX, dsize = 0 tdb_open: struct tdb_context *(const char *, int, int, int, mode_t) tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func) tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_printfreelist: int (struct tdb_context *) tdb_remove_flags: void (struct tdb_context *, unsigned int) tdb_reopen: int (struct tdb_context *) tdb_reopen_all: int (int) tdb_repack: int (struct tdb_context *) tdb_rescue: int (struct tdb_context *, void (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_runtime_check_for_robust_mutexes: bool (void) tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *) tdb_set_max_dead: void (struct tdb_context *, int) tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *) tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int) tdb_storev: int (struct tdb_context *, TDB_DATA, const TDB_DATA *, int, int) tdb_summary: char *(struct tdb_context *) tdb_transaction_active: bool (struct tdb_context *) tdb_transaction_cancel: int (struct tdb_context *) tdb_transaction_commit: int (struct tdb_context *) tdb_transaction_prepare_commit: int (struct tdb_context *) tdb_transaction_start: int (struct tdb_context *) tdb_transaction_start_nonblock: int (struct tdb_context *) tdb_transaction_write_lock_mark: int (struct tdb_context *) tdb_transaction_write_lock_unmark: int (struct tdb_context *) tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *) tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *) tdb_unlock: int (struct tdb_context *, int, int) tdb_unlockall: int (struct tdb_context *) tdb_unlockall_read: int (struct tdb_context *) tdb_validate_freelist: int (struct tdb_context *, int *) tdb_wipe_all: int (struct tdb_context *) tdb-1.4.2/ABI/tdb-1.3.16.sigs0000660000000000000000000000743313444661620015017 0ustar rootroot00000000000000tdb_add_flags: void (struct tdb_context *, unsigned int) tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA) tdb_chainlock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA) tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA) tdb_chainunlock: int (struct tdb_context *, TDB_DATA) tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA) tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_close: int (struct tdb_context *) tdb_delete: int (struct tdb_context *, TDB_DATA) tdb_dump_all: void (struct tdb_context *) tdb_enable_seqnum: void (struct tdb_context *) tdb_error: enum TDB_ERROR (struct tdb_context *) tdb_errorstr: const char *(struct tdb_context *) tdb_exists: int (struct tdb_context *, TDB_DATA) tdb_fd: int (struct tdb_context *) tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_firstkey: TDB_DATA (struct tdb_context *) tdb_freelist_size: int (struct tdb_context *) tdb_get_flags: int (struct tdb_context *) tdb_get_logging_private: void *(struct tdb_context *) tdb_get_seqnum: int (struct tdb_context *) tdb_hash_size: int (struct tdb_context *) tdb_increment_seqnum_nonblock: void (struct tdb_context *) tdb_jenkins_hash: unsigned int (TDB_DATA *) tdb_lock_nonblock: int (struct tdb_context *, int, int) tdb_lockall: int (struct tdb_context *) tdb_lockall_mark: int (struct tdb_context *) tdb_lockall_nonblock: int (struct tdb_context *) tdb_lockall_read: int (struct tdb_context *) tdb_lockall_read_nonblock: int (struct tdb_context *) tdb_lockall_unmark: int (struct tdb_context *) tdb_log_fn: tdb_log_func (struct tdb_context *) tdb_map_size: size_t (struct tdb_context *) tdb_name: const char *(struct tdb_context *) tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_null: dptr = 0xXXXX, dsize = 0 tdb_open: struct tdb_context *(const char *, int, int, int, mode_t) tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func) tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_printfreelist: int (struct tdb_context *) tdb_remove_flags: void (struct tdb_context *, unsigned int) tdb_reopen: int (struct tdb_context *) tdb_reopen_all: int (int) tdb_repack: int (struct tdb_context *) tdb_rescue: int (struct tdb_context *, void (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_runtime_check_for_robust_mutexes: bool (void) tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *) tdb_set_max_dead: void (struct tdb_context *, int) tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *) tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int) tdb_storev: int (struct tdb_context *, TDB_DATA, const TDB_DATA *, int, int) tdb_summary: char *(struct tdb_context *) tdb_transaction_active: bool (struct tdb_context *) tdb_transaction_cancel: int (struct tdb_context *) tdb_transaction_commit: int (struct tdb_context *) tdb_transaction_prepare_commit: int (struct tdb_context *) tdb_transaction_start: int (struct tdb_context *) tdb_transaction_start_nonblock: int (struct tdb_context *) tdb_transaction_write_lock_mark: int (struct tdb_context *) tdb_transaction_write_lock_unmark: int (struct tdb_context *) tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *) tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *) tdb_unlock: int (struct tdb_context *, int, int) tdb_unlockall: int (struct tdb_context *) tdb_unlockall_read: int (struct tdb_context *) tdb_validate_freelist: int (struct tdb_context *, int *) tdb_wipe_all: int (struct tdb_context *) tdb-1.4.2/ABI/tdb-1.3.17.sigs0000660000000000000000000000771313444661620015021 0ustar rootroot00000000000000tdb_add_flags: void (struct tdb_context *, unsigned int) tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA) tdb_chainlock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA) tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA) tdb_chainunlock: int (struct tdb_context *, TDB_DATA) tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA) tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_close: int (struct tdb_context *) tdb_delete: int (struct tdb_context *, TDB_DATA) tdb_dump_all: void (struct tdb_context *) tdb_enable_seqnum: void (struct tdb_context *) tdb_error: enum TDB_ERROR (struct tdb_context *) tdb_errorstr: const char *(struct tdb_context *) tdb_exists: int (struct tdb_context *, TDB_DATA) tdb_fd: int (struct tdb_context *) tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_firstkey: TDB_DATA (struct tdb_context *) tdb_freelist_size: int (struct tdb_context *) tdb_get_flags: int (struct tdb_context *) tdb_get_logging_private: void *(struct tdb_context *) tdb_get_seqnum: int (struct tdb_context *) tdb_hash_size: int (struct tdb_context *) tdb_increment_seqnum_nonblock: void (struct tdb_context *) tdb_jenkins_hash: unsigned int (TDB_DATA *) tdb_lock_nonblock: int (struct tdb_context *, int, int) tdb_lockall: int (struct tdb_context *) tdb_lockall_mark: int (struct tdb_context *) tdb_lockall_nonblock: int (struct tdb_context *) tdb_lockall_read: int (struct tdb_context *) tdb_lockall_read_nonblock: int (struct tdb_context *) tdb_lockall_unmark: int (struct tdb_context *) tdb_log_fn: tdb_log_func (struct tdb_context *) tdb_map_size: size_t (struct tdb_context *) tdb_name: const char *(struct tdb_context *) tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_null: dptr = 0xXXXX, dsize = 0 tdb_open: struct tdb_context *(const char *, int, int, int, mode_t) tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func) tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_printfreelist: int (struct tdb_context *) tdb_remove_flags: void (struct tdb_context *, unsigned int) tdb_reopen: int (struct tdb_context *) tdb_reopen_all: int (int) tdb_repack: int (struct tdb_context *) tdb_rescue: int (struct tdb_context *, void (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_runtime_check_for_robust_mutexes: bool (void) tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *) tdb_set_max_dead: void (struct tdb_context *, int) tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *) tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int) tdb_storev: int (struct tdb_context *, TDB_DATA, const TDB_DATA *, int, int) tdb_summary: char *(struct tdb_context *) tdb_transaction_active: bool (struct tdb_context *) tdb_transaction_cancel: int (struct tdb_context *) tdb_transaction_commit: int (struct tdb_context *) tdb_transaction_prepare_commit: int (struct tdb_context *) tdb_transaction_start: int (struct tdb_context *) tdb_transaction_start_nonblock: int (struct tdb_context *) tdb_transaction_write_lock_mark: int (struct tdb_context *) tdb_transaction_write_lock_unmark: int (struct tdb_context *) tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *) tdb_traverse_chain: int (struct tdb_context *, unsigned int, tdb_traverse_func, void *) tdb_traverse_key_chain: int (struct tdb_context *, TDB_DATA, tdb_traverse_func, void *) tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *) tdb_unlock: int (struct tdb_context *, int, int) tdb_unlockall: int (struct tdb_context *) tdb_unlockall_read: int (struct tdb_context *) tdb_validate_freelist: int (struct tdb_context *, int *) tdb_wipe_all: int (struct tdb_context *) tdb-1.4.2/ABI/tdb-1.3.18.sigs0000660000000000000000000000771313444661620015022 0ustar rootroot00000000000000tdb_add_flags: void (struct tdb_context *, unsigned int) tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA) tdb_chainlock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA) tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA) tdb_chainunlock: int (struct tdb_context *, TDB_DATA) tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA) tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_close: int (struct tdb_context *) tdb_delete: int (struct tdb_context *, TDB_DATA) tdb_dump_all: void (struct tdb_context *) tdb_enable_seqnum: void (struct tdb_context *) tdb_error: enum TDB_ERROR (struct tdb_context *) tdb_errorstr: const char *(struct tdb_context *) tdb_exists: int (struct tdb_context *, TDB_DATA) tdb_fd: int (struct tdb_context *) tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_firstkey: TDB_DATA (struct tdb_context *) tdb_freelist_size: int (struct tdb_context *) tdb_get_flags: int (struct tdb_context *) tdb_get_logging_private: void *(struct tdb_context *) tdb_get_seqnum: int (struct tdb_context *) tdb_hash_size: int (struct tdb_context *) tdb_increment_seqnum_nonblock: void (struct tdb_context *) tdb_jenkins_hash: unsigned int (TDB_DATA *) tdb_lock_nonblock: int (struct tdb_context *, int, int) tdb_lockall: int (struct tdb_context *) tdb_lockall_mark: int (struct tdb_context *) tdb_lockall_nonblock: int (struct tdb_context *) tdb_lockall_read: int (struct tdb_context *) tdb_lockall_read_nonblock: int (struct tdb_context *) tdb_lockall_unmark: int (struct tdb_context *) tdb_log_fn: tdb_log_func (struct tdb_context *) tdb_map_size: size_t (struct tdb_context *) tdb_name: const char *(struct tdb_context *) tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_null: dptr = 0xXXXX, dsize = 0 tdb_open: struct tdb_context *(const char *, int, int, int, mode_t) tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func) tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_printfreelist: int (struct tdb_context *) tdb_remove_flags: void (struct tdb_context *, unsigned int) tdb_reopen: int (struct tdb_context *) tdb_reopen_all: int (int) tdb_repack: int (struct tdb_context *) tdb_rescue: int (struct tdb_context *, void (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_runtime_check_for_robust_mutexes: bool (void) tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *) tdb_set_max_dead: void (struct tdb_context *, int) tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *) tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int) tdb_storev: int (struct tdb_context *, TDB_DATA, const TDB_DATA *, int, int) tdb_summary: char *(struct tdb_context *) tdb_transaction_active: bool (struct tdb_context *) tdb_transaction_cancel: int (struct tdb_context *) tdb_transaction_commit: int (struct tdb_context *) tdb_transaction_prepare_commit: int (struct tdb_context *) tdb_transaction_start: int (struct tdb_context *) tdb_transaction_start_nonblock: int (struct tdb_context *) tdb_transaction_write_lock_mark: int (struct tdb_context *) tdb_transaction_write_lock_unmark: int (struct tdb_context *) tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *) tdb_traverse_chain: int (struct tdb_context *, unsigned int, tdb_traverse_func, void *) tdb_traverse_key_chain: int (struct tdb_context *, TDB_DATA, tdb_traverse_func, void *) tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *) tdb_unlock: int (struct tdb_context *, int, int) tdb_unlockall: int (struct tdb_context *) tdb_unlockall_read: int (struct tdb_context *) tdb_validate_freelist: int (struct tdb_context *, int *) tdb_wipe_all: int (struct tdb_context *) tdb-1.4.2/ABI/tdb-1.3.2.sigs0000660000000000000000000000713012436323671014725 0ustar rootroot00000000000000tdb_add_flags: void (struct tdb_context *, unsigned int) tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA) tdb_chainlock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA) tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read: int (struct tdb_context *, TDB_DATA) tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA) tdb_chainunlock: int (struct tdb_context *, TDB_DATA) tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA) tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_close: int (struct tdb_context *) tdb_delete: int (struct tdb_context *, TDB_DATA) tdb_dump_all: void (struct tdb_context *) tdb_enable_seqnum: void (struct tdb_context *) tdb_error: enum TDB_ERROR (struct tdb_context *) tdb_errorstr: const char *(struct tdb_context *) tdb_exists: int (struct tdb_context *, TDB_DATA) tdb_fd: int (struct tdb_context *) tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_firstkey: TDB_DATA (struct tdb_context *) tdb_freelist_size: int (struct tdb_context *) tdb_get_flags: int (struct tdb_context *) tdb_get_logging_private: void *(struct tdb_context *) tdb_get_seqnum: int (struct tdb_context *) tdb_hash_size: int (struct tdb_context *) tdb_increment_seqnum_nonblock: void (struct tdb_context *) tdb_jenkins_hash: unsigned int (TDB_DATA *) tdb_lock_nonblock: int (struct tdb_context *, int, int) tdb_lockall: int (struct tdb_context *) tdb_lockall_mark: int (struct tdb_context *) tdb_lockall_nonblock: int (struct tdb_context *) tdb_lockall_read: int (struct tdb_context *) tdb_lockall_read_nonblock: int (struct tdb_context *) tdb_lockall_unmark: int (struct tdb_context *) tdb_log_fn: tdb_log_func (struct tdb_context *) tdb_map_size: size_t (struct tdb_context *) tdb_name: const char *(struct tdb_context *) tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_null: dptr = 0xXXXX, dsize = 0 tdb_open: struct tdb_context *(const char *, int, int, int, mode_t) tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func) tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_printfreelist: int (struct tdb_context *) tdb_remove_flags: void (struct tdb_context *, unsigned int) tdb_reopen: int (struct tdb_context *) tdb_reopen_all: int (int) tdb_repack: int (struct tdb_context *) tdb_rescue: int (struct tdb_context *, void (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_runtime_check_for_robust_mutexes: bool (void) tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *) tdb_set_max_dead: void (struct tdb_context *, int) tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *) tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int) tdb_summary: char *(struct tdb_context *) tdb_transaction_cancel: int (struct tdb_context *) tdb_transaction_commit: int (struct tdb_context *) tdb_transaction_prepare_commit: int (struct tdb_context *) tdb_transaction_start: int (struct tdb_context *) tdb_transaction_start_nonblock: int (struct tdb_context *) tdb_transaction_write_lock_mark: int (struct tdb_context *) tdb_transaction_write_lock_unmark: int (struct tdb_context *) tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *) tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *) tdb_unlock: int (struct tdb_context *, int, int) tdb_unlockall: int (struct tdb_context *) tdb_unlockall_read: int (struct tdb_context *) tdb_validate_freelist: int (struct tdb_context *, int *) tdb_wipe_all: int (struct tdb_context *) tdb-1.4.2/ABI/tdb-1.3.3.sigs0000660000000000000000000000713012437274221014723 0ustar rootroot00000000000000tdb_add_flags: void (struct tdb_context *, unsigned int) tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA) tdb_chainlock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA) tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read: int (struct tdb_context *, TDB_DATA) tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA) tdb_chainunlock: int (struct tdb_context *, TDB_DATA) tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA) tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_close: int (struct tdb_context *) tdb_delete: int (struct tdb_context *, TDB_DATA) tdb_dump_all: void (struct tdb_context *) tdb_enable_seqnum: void (struct tdb_context *) tdb_error: enum TDB_ERROR (struct tdb_context *) tdb_errorstr: const char *(struct tdb_context *) tdb_exists: int (struct tdb_context *, TDB_DATA) tdb_fd: int (struct tdb_context *) tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_firstkey: TDB_DATA (struct tdb_context *) tdb_freelist_size: int (struct tdb_context *) tdb_get_flags: int (struct tdb_context *) tdb_get_logging_private: void *(struct tdb_context *) tdb_get_seqnum: int (struct tdb_context *) tdb_hash_size: int (struct tdb_context *) tdb_increment_seqnum_nonblock: void (struct tdb_context *) tdb_jenkins_hash: unsigned int (TDB_DATA *) tdb_lock_nonblock: int (struct tdb_context *, int, int) tdb_lockall: int (struct tdb_context *) tdb_lockall_mark: int (struct tdb_context *) tdb_lockall_nonblock: int (struct tdb_context *) tdb_lockall_read: int (struct tdb_context *) tdb_lockall_read_nonblock: int (struct tdb_context *) tdb_lockall_unmark: int (struct tdb_context *) tdb_log_fn: tdb_log_func (struct tdb_context *) tdb_map_size: size_t (struct tdb_context *) tdb_name: const char *(struct tdb_context *) tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_null: dptr = 0xXXXX, dsize = 0 tdb_open: struct tdb_context *(const char *, int, int, int, mode_t) tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func) tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_printfreelist: int (struct tdb_context *) tdb_remove_flags: void (struct tdb_context *, unsigned int) tdb_reopen: int (struct tdb_context *) tdb_reopen_all: int (int) tdb_repack: int (struct tdb_context *) tdb_rescue: int (struct tdb_context *, void (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_runtime_check_for_robust_mutexes: bool (void) tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *) tdb_set_max_dead: void (struct tdb_context *, int) tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *) tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int) tdb_summary: char *(struct tdb_context *) tdb_transaction_cancel: int (struct tdb_context *) tdb_transaction_commit: int (struct tdb_context *) tdb_transaction_prepare_commit: int (struct tdb_context *) tdb_transaction_start: int (struct tdb_context *) tdb_transaction_start_nonblock: int (struct tdb_context *) tdb_transaction_write_lock_mark: int (struct tdb_context *) tdb_transaction_write_lock_unmark: int (struct tdb_context *) tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *) tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *) tdb_unlock: int (struct tdb_context *, int, int) tdb_unlockall: int (struct tdb_context *) tdb_unlockall_read: int (struct tdb_context *) tdb_validate_freelist: int (struct tdb_context *, int *) tdb_wipe_all: int (struct tdb_context *) tdb-1.4.2/ABI/tdb-1.3.4.sigs0000660000000000000000000000713012445751350014726 0ustar rootroot00000000000000tdb_add_flags: void (struct tdb_context *, unsigned int) tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA) tdb_chainlock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA) tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read: int (struct tdb_context *, TDB_DATA) tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA) tdb_chainunlock: int (struct tdb_context *, TDB_DATA) tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA) tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_close: int (struct tdb_context *) tdb_delete: int (struct tdb_context *, TDB_DATA) tdb_dump_all: void (struct tdb_context *) tdb_enable_seqnum: void (struct tdb_context *) tdb_error: enum TDB_ERROR (struct tdb_context *) tdb_errorstr: const char *(struct tdb_context *) tdb_exists: int (struct tdb_context *, TDB_DATA) tdb_fd: int (struct tdb_context *) tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_firstkey: TDB_DATA (struct tdb_context *) tdb_freelist_size: int (struct tdb_context *) tdb_get_flags: int (struct tdb_context *) tdb_get_logging_private: void *(struct tdb_context *) tdb_get_seqnum: int (struct tdb_context *) tdb_hash_size: int (struct tdb_context *) tdb_increment_seqnum_nonblock: void (struct tdb_context *) tdb_jenkins_hash: unsigned int (TDB_DATA *) tdb_lock_nonblock: int (struct tdb_context *, int, int) tdb_lockall: int (struct tdb_context *) tdb_lockall_mark: int (struct tdb_context *) tdb_lockall_nonblock: int (struct tdb_context *) tdb_lockall_read: int (struct tdb_context *) tdb_lockall_read_nonblock: int (struct tdb_context *) tdb_lockall_unmark: int (struct tdb_context *) tdb_log_fn: tdb_log_func (struct tdb_context *) tdb_map_size: size_t (struct tdb_context *) tdb_name: const char *(struct tdb_context *) tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_null: dptr = 0xXXXX, dsize = 0 tdb_open: struct tdb_context *(const char *, int, int, int, mode_t) tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func) tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_printfreelist: int (struct tdb_context *) tdb_remove_flags: void (struct tdb_context *, unsigned int) tdb_reopen: int (struct tdb_context *) tdb_reopen_all: int (int) tdb_repack: int (struct tdb_context *) tdb_rescue: int (struct tdb_context *, void (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_runtime_check_for_robust_mutexes: bool (void) tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *) tdb_set_max_dead: void (struct tdb_context *, int) tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *) tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int) tdb_summary: char *(struct tdb_context *) tdb_transaction_cancel: int (struct tdb_context *) tdb_transaction_commit: int (struct tdb_context *) tdb_transaction_prepare_commit: int (struct tdb_context *) tdb_transaction_start: int (struct tdb_context *) tdb_transaction_start_nonblock: int (struct tdb_context *) tdb_transaction_write_lock_mark: int (struct tdb_context *) tdb_transaction_write_lock_unmark: int (struct tdb_context *) tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *) tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *) tdb_unlock: int (struct tdb_context *, int, int) tdb_unlockall: int (struct tdb_context *) tdb_unlockall_read: int (struct tdb_context *) tdb_validate_freelist: int (struct tdb_context *, int *) tdb_wipe_all: int (struct tdb_context *) tdb-1.4.2/ABI/tdb-1.3.5.sigs0000660000000000000000000000723212520121120014706 0ustar rootroot00000000000000tdb_add_flags: void (struct tdb_context *, unsigned int) tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA) tdb_chainlock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA) tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA) tdb_chainunlock: int (struct tdb_context *, TDB_DATA) tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA) tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_close: int (struct tdb_context *) tdb_delete: int (struct tdb_context *, TDB_DATA) tdb_dump_all: void (struct tdb_context *) tdb_enable_seqnum: void (struct tdb_context *) tdb_error: enum TDB_ERROR (struct tdb_context *) tdb_errorstr: const char *(struct tdb_context *) tdb_exists: int (struct tdb_context *, TDB_DATA) tdb_fd: int (struct tdb_context *) tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_firstkey: TDB_DATA (struct tdb_context *) tdb_freelist_size: int (struct tdb_context *) tdb_get_flags: int (struct tdb_context *) tdb_get_logging_private: void *(struct tdb_context *) tdb_get_seqnum: int (struct tdb_context *) tdb_hash_size: int (struct tdb_context *) tdb_increment_seqnum_nonblock: void (struct tdb_context *) tdb_jenkins_hash: unsigned int (TDB_DATA *) tdb_lock_nonblock: int (struct tdb_context *, int, int) tdb_lockall: int (struct tdb_context *) tdb_lockall_mark: int (struct tdb_context *) tdb_lockall_nonblock: int (struct tdb_context *) tdb_lockall_read: int (struct tdb_context *) tdb_lockall_read_nonblock: int (struct tdb_context *) tdb_lockall_unmark: int (struct tdb_context *) tdb_log_fn: tdb_log_func (struct tdb_context *) tdb_map_size: size_t (struct tdb_context *) tdb_name: const char *(struct tdb_context *) tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_null: dptr = 0xXXXX, dsize = 0 tdb_open: struct tdb_context *(const char *, int, int, int, mode_t) tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func) tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_printfreelist: int (struct tdb_context *) tdb_remove_flags: void (struct tdb_context *, unsigned int) tdb_reopen: int (struct tdb_context *) tdb_reopen_all: int (int) tdb_repack: int (struct tdb_context *) tdb_rescue: int (struct tdb_context *, void (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_runtime_check_for_robust_mutexes: bool (void) tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *) tdb_set_max_dead: void (struct tdb_context *, int) tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *) tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int) tdb_summary: char *(struct tdb_context *) tdb_transaction_cancel: int (struct tdb_context *) tdb_transaction_commit: int (struct tdb_context *) tdb_transaction_prepare_commit: int (struct tdb_context *) tdb_transaction_start: int (struct tdb_context *) tdb_transaction_start_nonblock: int (struct tdb_context *) tdb_transaction_write_lock_mark: int (struct tdb_context *) tdb_transaction_write_lock_unmark: int (struct tdb_context *) tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *) tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *) tdb_unlock: int (struct tdb_context *, int, int) tdb_unlockall: int (struct tdb_context *) tdb_unlockall_read: int (struct tdb_context *) tdb_validate_freelist: int (struct tdb_context *, int *) tdb_wipe_all: int (struct tdb_context *) tdb-1.4.2/ABI/tdb-1.3.6.sigs0000660000000000000000000000723212536700353014731 0ustar rootroot00000000000000tdb_add_flags: void (struct tdb_context *, unsigned int) tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA) tdb_chainlock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA) tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA) tdb_chainunlock: int (struct tdb_context *, TDB_DATA) tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA) tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_close: int (struct tdb_context *) tdb_delete: int (struct tdb_context *, TDB_DATA) tdb_dump_all: void (struct tdb_context *) tdb_enable_seqnum: void (struct tdb_context *) tdb_error: enum TDB_ERROR (struct tdb_context *) tdb_errorstr: const char *(struct tdb_context *) tdb_exists: int (struct tdb_context *, TDB_DATA) tdb_fd: int (struct tdb_context *) tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_firstkey: TDB_DATA (struct tdb_context *) tdb_freelist_size: int (struct tdb_context *) tdb_get_flags: int (struct tdb_context *) tdb_get_logging_private: void *(struct tdb_context *) tdb_get_seqnum: int (struct tdb_context *) tdb_hash_size: int (struct tdb_context *) tdb_increment_seqnum_nonblock: void (struct tdb_context *) tdb_jenkins_hash: unsigned int (TDB_DATA *) tdb_lock_nonblock: int (struct tdb_context *, int, int) tdb_lockall: int (struct tdb_context *) tdb_lockall_mark: int (struct tdb_context *) tdb_lockall_nonblock: int (struct tdb_context *) tdb_lockall_read: int (struct tdb_context *) tdb_lockall_read_nonblock: int (struct tdb_context *) tdb_lockall_unmark: int (struct tdb_context *) tdb_log_fn: tdb_log_func (struct tdb_context *) tdb_map_size: size_t (struct tdb_context *) tdb_name: const char *(struct tdb_context *) tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_null: dptr = 0xXXXX, dsize = 0 tdb_open: struct tdb_context *(const char *, int, int, int, mode_t) tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func) tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_printfreelist: int (struct tdb_context *) tdb_remove_flags: void (struct tdb_context *, unsigned int) tdb_reopen: int (struct tdb_context *) tdb_reopen_all: int (int) tdb_repack: int (struct tdb_context *) tdb_rescue: int (struct tdb_context *, void (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_runtime_check_for_robust_mutexes: bool (void) tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *) tdb_set_max_dead: void (struct tdb_context *, int) tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *) tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int) tdb_summary: char *(struct tdb_context *) tdb_transaction_cancel: int (struct tdb_context *) tdb_transaction_commit: int (struct tdb_context *) tdb_transaction_prepare_commit: int (struct tdb_context *) tdb_transaction_start: int (struct tdb_context *) tdb_transaction_start_nonblock: int (struct tdb_context *) tdb_transaction_write_lock_mark: int (struct tdb_context *) tdb_transaction_write_lock_unmark: int (struct tdb_context *) tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *) tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *) tdb_unlock: int (struct tdb_context *, int, int) tdb_unlockall: int (struct tdb_context *) tdb_unlockall_read: int (struct tdb_context *) tdb_validate_freelist: int (struct tdb_context *, int *) tdb_wipe_all: int (struct tdb_context *) tdb-1.4.2/ABI/tdb-1.3.7.sigs0000660000000000000000000000723212553526406014736 0ustar rootroot00000000000000tdb_add_flags: void (struct tdb_context *, unsigned int) tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA) tdb_chainlock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA) tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA) tdb_chainunlock: int (struct tdb_context *, TDB_DATA) tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA) tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_close: int (struct tdb_context *) tdb_delete: int (struct tdb_context *, TDB_DATA) tdb_dump_all: void (struct tdb_context *) tdb_enable_seqnum: void (struct tdb_context *) tdb_error: enum TDB_ERROR (struct tdb_context *) tdb_errorstr: const char *(struct tdb_context *) tdb_exists: int (struct tdb_context *, TDB_DATA) tdb_fd: int (struct tdb_context *) tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_firstkey: TDB_DATA (struct tdb_context *) tdb_freelist_size: int (struct tdb_context *) tdb_get_flags: int (struct tdb_context *) tdb_get_logging_private: void *(struct tdb_context *) tdb_get_seqnum: int (struct tdb_context *) tdb_hash_size: int (struct tdb_context *) tdb_increment_seqnum_nonblock: void (struct tdb_context *) tdb_jenkins_hash: unsigned int (TDB_DATA *) tdb_lock_nonblock: int (struct tdb_context *, int, int) tdb_lockall: int (struct tdb_context *) tdb_lockall_mark: int (struct tdb_context *) tdb_lockall_nonblock: int (struct tdb_context *) tdb_lockall_read: int (struct tdb_context *) tdb_lockall_read_nonblock: int (struct tdb_context *) tdb_lockall_unmark: int (struct tdb_context *) tdb_log_fn: tdb_log_func (struct tdb_context *) tdb_map_size: size_t (struct tdb_context *) tdb_name: const char *(struct tdb_context *) tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_null: dptr = 0xXXXX, dsize = 0 tdb_open: struct tdb_context *(const char *, int, int, int, mode_t) tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func) tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_printfreelist: int (struct tdb_context *) tdb_remove_flags: void (struct tdb_context *, unsigned int) tdb_reopen: int (struct tdb_context *) tdb_reopen_all: int (int) tdb_repack: int (struct tdb_context *) tdb_rescue: int (struct tdb_context *, void (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_runtime_check_for_robust_mutexes: bool (void) tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *) tdb_set_max_dead: void (struct tdb_context *, int) tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *) tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int) tdb_summary: char *(struct tdb_context *) tdb_transaction_cancel: int (struct tdb_context *) tdb_transaction_commit: int (struct tdb_context *) tdb_transaction_prepare_commit: int (struct tdb_context *) tdb_transaction_start: int (struct tdb_context *) tdb_transaction_start_nonblock: int (struct tdb_context *) tdb_transaction_write_lock_mark: int (struct tdb_context *) tdb_transaction_write_lock_unmark: int (struct tdb_context *) tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *) tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *) tdb_unlock: int (struct tdb_context *, int, int) tdb_unlockall: int (struct tdb_context *) tdb_unlockall_read: int (struct tdb_context *) tdb_validate_freelist: int (struct tdb_context *, int *) tdb_wipe_all: int (struct tdb_context *) tdb-1.4.2/ABI/tdb-1.3.8.sigs0000660000000000000000000000723212617125445014736 0ustar rootroot00000000000000tdb_add_flags: void (struct tdb_context *, unsigned int) tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA) tdb_chainlock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA) tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA) tdb_chainunlock: int (struct tdb_context *, TDB_DATA) tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA) tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_close: int (struct tdb_context *) tdb_delete: int (struct tdb_context *, TDB_DATA) tdb_dump_all: void (struct tdb_context *) tdb_enable_seqnum: void (struct tdb_context *) tdb_error: enum TDB_ERROR (struct tdb_context *) tdb_errorstr: const char *(struct tdb_context *) tdb_exists: int (struct tdb_context *, TDB_DATA) tdb_fd: int (struct tdb_context *) tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_firstkey: TDB_DATA (struct tdb_context *) tdb_freelist_size: int (struct tdb_context *) tdb_get_flags: int (struct tdb_context *) tdb_get_logging_private: void *(struct tdb_context *) tdb_get_seqnum: int (struct tdb_context *) tdb_hash_size: int (struct tdb_context *) tdb_increment_seqnum_nonblock: void (struct tdb_context *) tdb_jenkins_hash: unsigned int (TDB_DATA *) tdb_lock_nonblock: int (struct tdb_context *, int, int) tdb_lockall: int (struct tdb_context *) tdb_lockall_mark: int (struct tdb_context *) tdb_lockall_nonblock: int (struct tdb_context *) tdb_lockall_read: int (struct tdb_context *) tdb_lockall_read_nonblock: int (struct tdb_context *) tdb_lockall_unmark: int (struct tdb_context *) tdb_log_fn: tdb_log_func (struct tdb_context *) tdb_map_size: size_t (struct tdb_context *) tdb_name: const char *(struct tdb_context *) tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_null: dptr = 0xXXXX, dsize = 0 tdb_open: struct tdb_context *(const char *, int, int, int, mode_t) tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func) tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_printfreelist: int (struct tdb_context *) tdb_remove_flags: void (struct tdb_context *, unsigned int) tdb_reopen: int (struct tdb_context *) tdb_reopen_all: int (int) tdb_repack: int (struct tdb_context *) tdb_rescue: int (struct tdb_context *, void (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_runtime_check_for_robust_mutexes: bool (void) tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *) tdb_set_max_dead: void (struct tdb_context *, int) tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *) tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int) tdb_summary: char *(struct tdb_context *) tdb_transaction_cancel: int (struct tdb_context *) tdb_transaction_commit: int (struct tdb_context *) tdb_transaction_prepare_commit: int (struct tdb_context *) tdb_transaction_start: int (struct tdb_context *) tdb_transaction_start_nonblock: int (struct tdb_context *) tdb_transaction_write_lock_mark: int (struct tdb_context *) tdb_transaction_write_lock_unmark: int (struct tdb_context *) tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *) tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *) tdb_unlock: int (struct tdb_context *, int, int) tdb_unlockall: int (struct tdb_context *) tdb_unlockall_read: int (struct tdb_context *) tdb_validate_freelist: int (struct tdb_context *, int *) tdb_wipe_all: int (struct tdb_context *) tdb-1.4.2/ABI/tdb-1.3.9.sigs0000660000000000000000000000723212702766507014744 0ustar rootroot00000000000000tdb_add_flags: void (struct tdb_context *, unsigned int) tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA) tdb_chainlock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA) tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA) tdb_chainunlock: int (struct tdb_context *, TDB_DATA) tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA) tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_close: int (struct tdb_context *) tdb_delete: int (struct tdb_context *, TDB_DATA) tdb_dump_all: void (struct tdb_context *) tdb_enable_seqnum: void (struct tdb_context *) tdb_error: enum TDB_ERROR (struct tdb_context *) tdb_errorstr: const char *(struct tdb_context *) tdb_exists: int (struct tdb_context *, TDB_DATA) tdb_fd: int (struct tdb_context *) tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_firstkey: TDB_DATA (struct tdb_context *) tdb_freelist_size: int (struct tdb_context *) tdb_get_flags: int (struct tdb_context *) tdb_get_logging_private: void *(struct tdb_context *) tdb_get_seqnum: int (struct tdb_context *) tdb_hash_size: int (struct tdb_context *) tdb_increment_seqnum_nonblock: void (struct tdb_context *) tdb_jenkins_hash: unsigned int (TDB_DATA *) tdb_lock_nonblock: int (struct tdb_context *, int, int) tdb_lockall: int (struct tdb_context *) tdb_lockall_mark: int (struct tdb_context *) tdb_lockall_nonblock: int (struct tdb_context *) tdb_lockall_read: int (struct tdb_context *) tdb_lockall_read_nonblock: int (struct tdb_context *) tdb_lockall_unmark: int (struct tdb_context *) tdb_log_fn: tdb_log_func (struct tdb_context *) tdb_map_size: size_t (struct tdb_context *) tdb_name: const char *(struct tdb_context *) tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_null: dptr = 0xXXXX, dsize = 0 tdb_open: struct tdb_context *(const char *, int, int, int, mode_t) tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func) tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_printfreelist: int (struct tdb_context *) tdb_remove_flags: void (struct tdb_context *, unsigned int) tdb_reopen: int (struct tdb_context *) tdb_reopen_all: int (int) tdb_repack: int (struct tdb_context *) tdb_rescue: int (struct tdb_context *, void (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_runtime_check_for_robust_mutexes: bool (void) tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *) tdb_set_max_dead: void (struct tdb_context *, int) tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *) tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int) tdb_summary: char *(struct tdb_context *) tdb_transaction_cancel: int (struct tdb_context *) tdb_transaction_commit: int (struct tdb_context *) tdb_transaction_prepare_commit: int (struct tdb_context *) tdb_transaction_start: int (struct tdb_context *) tdb_transaction_start_nonblock: int (struct tdb_context *) tdb_transaction_write_lock_mark: int (struct tdb_context *) tdb_transaction_write_lock_unmark: int (struct tdb_context *) tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *) tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *) tdb_unlock: int (struct tdb_context *, int, int) tdb_unlockall: int (struct tdb_context *) tdb_unlockall_read: int (struct tdb_context *) tdb_validate_freelist: int (struct tdb_context *, int *) tdb_wipe_all: int (struct tdb_context *) tdb-1.4.2/ABI/tdb-1.4.0.sigs0000660000000000000000000000771313444661756014744 0ustar rootroot00000000000000tdb_add_flags: void (struct tdb_context *, unsigned int) tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA) tdb_chainlock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA) tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA) tdb_chainunlock: int (struct tdb_context *, TDB_DATA) tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA) tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_close: int (struct tdb_context *) tdb_delete: int (struct tdb_context *, TDB_DATA) tdb_dump_all: void (struct tdb_context *) tdb_enable_seqnum: void (struct tdb_context *) tdb_error: enum TDB_ERROR (struct tdb_context *) tdb_errorstr: const char *(struct tdb_context *) tdb_exists: int (struct tdb_context *, TDB_DATA) tdb_fd: int (struct tdb_context *) tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_firstkey: TDB_DATA (struct tdb_context *) tdb_freelist_size: int (struct tdb_context *) tdb_get_flags: int (struct tdb_context *) tdb_get_logging_private: void *(struct tdb_context *) tdb_get_seqnum: int (struct tdb_context *) tdb_hash_size: int (struct tdb_context *) tdb_increment_seqnum_nonblock: void (struct tdb_context *) tdb_jenkins_hash: unsigned int (TDB_DATA *) tdb_lock_nonblock: int (struct tdb_context *, int, int) tdb_lockall: int (struct tdb_context *) tdb_lockall_mark: int (struct tdb_context *) tdb_lockall_nonblock: int (struct tdb_context *) tdb_lockall_read: int (struct tdb_context *) tdb_lockall_read_nonblock: int (struct tdb_context *) tdb_lockall_unmark: int (struct tdb_context *) tdb_log_fn: tdb_log_func (struct tdb_context *) tdb_map_size: size_t (struct tdb_context *) tdb_name: const char *(struct tdb_context *) tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_null: dptr = 0xXXXX, dsize = 0 tdb_open: struct tdb_context *(const char *, int, int, int, mode_t) tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func) tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_printfreelist: int (struct tdb_context *) tdb_remove_flags: void (struct tdb_context *, unsigned int) tdb_reopen: int (struct tdb_context *) tdb_reopen_all: int (int) tdb_repack: int (struct tdb_context *) tdb_rescue: int (struct tdb_context *, void (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_runtime_check_for_robust_mutexes: bool (void) tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *) tdb_set_max_dead: void (struct tdb_context *, int) tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *) tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int) tdb_storev: int (struct tdb_context *, TDB_DATA, const TDB_DATA *, int, int) tdb_summary: char *(struct tdb_context *) tdb_transaction_active: bool (struct tdb_context *) tdb_transaction_cancel: int (struct tdb_context *) tdb_transaction_commit: int (struct tdb_context *) tdb_transaction_prepare_commit: int (struct tdb_context *) tdb_transaction_start: int (struct tdb_context *) tdb_transaction_start_nonblock: int (struct tdb_context *) tdb_transaction_write_lock_mark: int (struct tdb_context *) tdb_transaction_write_lock_unmark: int (struct tdb_context *) tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *) tdb_traverse_chain: int (struct tdb_context *, unsigned int, tdb_traverse_func, void *) tdb_traverse_key_chain: int (struct tdb_context *, TDB_DATA, tdb_traverse_func, void *) tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *) tdb_unlock: int (struct tdb_context *, int, int) tdb_unlockall: int (struct tdb_context *) tdb_unlockall_read: int (struct tdb_context *) tdb_validate_freelist: int (struct tdb_context *, int *) tdb_wipe_all: int (struct tdb_context *) tdb-1.4.2/ABI/tdb-1.4.1.sigs0000660000000000000000000000771313526763114014735 0ustar rootroot00000000000000tdb_add_flags: void (struct tdb_context *, unsigned int) tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA) tdb_chainlock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA) tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA) tdb_chainunlock: int (struct tdb_context *, TDB_DATA) tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA) tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_close: int (struct tdb_context *) tdb_delete: int (struct tdb_context *, TDB_DATA) tdb_dump_all: void (struct tdb_context *) tdb_enable_seqnum: void (struct tdb_context *) tdb_error: enum TDB_ERROR (struct tdb_context *) tdb_errorstr: const char *(struct tdb_context *) tdb_exists: int (struct tdb_context *, TDB_DATA) tdb_fd: int (struct tdb_context *) tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_firstkey: TDB_DATA (struct tdb_context *) tdb_freelist_size: int (struct tdb_context *) tdb_get_flags: int (struct tdb_context *) tdb_get_logging_private: void *(struct tdb_context *) tdb_get_seqnum: int (struct tdb_context *) tdb_hash_size: int (struct tdb_context *) tdb_increment_seqnum_nonblock: void (struct tdb_context *) tdb_jenkins_hash: unsigned int (TDB_DATA *) tdb_lock_nonblock: int (struct tdb_context *, int, int) tdb_lockall: int (struct tdb_context *) tdb_lockall_mark: int (struct tdb_context *) tdb_lockall_nonblock: int (struct tdb_context *) tdb_lockall_read: int (struct tdb_context *) tdb_lockall_read_nonblock: int (struct tdb_context *) tdb_lockall_unmark: int (struct tdb_context *) tdb_log_fn: tdb_log_func (struct tdb_context *) tdb_map_size: size_t (struct tdb_context *) tdb_name: const char *(struct tdb_context *) tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_null: dptr = 0xXXXX, dsize = 0 tdb_open: struct tdb_context *(const char *, int, int, int, mode_t) tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func) tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_printfreelist: int (struct tdb_context *) tdb_remove_flags: void (struct tdb_context *, unsigned int) tdb_reopen: int (struct tdb_context *) tdb_reopen_all: int (int) tdb_repack: int (struct tdb_context *) tdb_rescue: int (struct tdb_context *, void (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_runtime_check_for_robust_mutexes: bool (void) tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *) tdb_set_max_dead: void (struct tdb_context *, int) tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *) tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int) tdb_storev: int (struct tdb_context *, TDB_DATA, const TDB_DATA *, int, int) tdb_summary: char *(struct tdb_context *) tdb_transaction_active: bool (struct tdb_context *) tdb_transaction_cancel: int (struct tdb_context *) tdb_transaction_commit: int (struct tdb_context *) tdb_transaction_prepare_commit: int (struct tdb_context *) tdb_transaction_start: int (struct tdb_context *) tdb_transaction_start_nonblock: int (struct tdb_context *) tdb_transaction_write_lock_mark: int (struct tdb_context *) tdb_transaction_write_lock_unmark: int (struct tdb_context *) tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *) tdb_traverse_chain: int (struct tdb_context *, unsigned int, tdb_traverse_func, void *) tdb_traverse_key_chain: int (struct tdb_context *, TDB_DATA, tdb_traverse_func, void *) tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *) tdb_unlock: int (struct tdb_context *, int, int) tdb_unlockall: int (struct tdb_context *) tdb_unlockall_read: int (struct tdb_context *) tdb_validate_freelist: int (struct tdb_context *, int *) tdb_wipe_all: int (struct tdb_context *) tdb-1.4.2/ABI/tdb-1.4.2.sigs0000660000000000000000000000771313527011454014730 0ustar rootroot00000000000000tdb_add_flags: void (struct tdb_context *, unsigned int) tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA) tdb_chainlock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA) tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read: int (struct tdb_context *, TDB_DATA) tdb_chainlock_read_nonblock: int (struct tdb_context *, TDB_DATA) tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA) tdb_chainunlock: int (struct tdb_context *, TDB_DATA) tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA) tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_close: int (struct tdb_context *) tdb_delete: int (struct tdb_context *, TDB_DATA) tdb_dump_all: void (struct tdb_context *) tdb_enable_seqnum: void (struct tdb_context *) tdb_error: enum TDB_ERROR (struct tdb_context *) tdb_errorstr: const char *(struct tdb_context *) tdb_exists: int (struct tdb_context *, TDB_DATA) tdb_fd: int (struct tdb_context *) tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_firstkey: TDB_DATA (struct tdb_context *) tdb_freelist_size: int (struct tdb_context *) tdb_get_flags: int (struct tdb_context *) tdb_get_logging_private: void *(struct tdb_context *) tdb_get_seqnum: int (struct tdb_context *) tdb_hash_size: int (struct tdb_context *) tdb_increment_seqnum_nonblock: void (struct tdb_context *) tdb_jenkins_hash: unsigned int (TDB_DATA *) tdb_lock_nonblock: int (struct tdb_context *, int, int) tdb_lockall: int (struct tdb_context *) tdb_lockall_mark: int (struct tdb_context *) tdb_lockall_nonblock: int (struct tdb_context *) tdb_lockall_read: int (struct tdb_context *) tdb_lockall_read_nonblock: int (struct tdb_context *) tdb_lockall_unmark: int (struct tdb_context *) tdb_log_fn: tdb_log_func (struct tdb_context *) tdb_map_size: size_t (struct tdb_context *) tdb_name: const char *(struct tdb_context *) tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA) tdb_null: dptr = 0xXXXX, dsize = 0 tdb_open: struct tdb_context *(const char *, int, int, int, mode_t) tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func) tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_printfreelist: int (struct tdb_context *) tdb_remove_flags: void (struct tdb_context *, unsigned int) tdb_reopen: int (struct tdb_context *) tdb_reopen_all: int (int) tdb_repack: int (struct tdb_context *) tdb_rescue: int (struct tdb_context *, void (*)(TDB_DATA, TDB_DATA, void *), void *) tdb_runtime_check_for_robust_mutexes: bool (void) tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *) tdb_set_max_dead: void (struct tdb_context *, int) tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *) tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int) tdb_storev: int (struct tdb_context *, TDB_DATA, const TDB_DATA *, int, int) tdb_summary: char *(struct tdb_context *) tdb_transaction_active: bool (struct tdb_context *) tdb_transaction_cancel: int (struct tdb_context *) tdb_transaction_commit: int (struct tdb_context *) tdb_transaction_prepare_commit: int (struct tdb_context *) tdb_transaction_start: int (struct tdb_context *) tdb_transaction_start_nonblock: int (struct tdb_context *) tdb_transaction_write_lock_mark: int (struct tdb_context *) tdb_transaction_write_lock_unmark: int (struct tdb_context *) tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *) tdb_traverse_chain: int (struct tdb_context *, unsigned int, tdb_traverse_func, void *) tdb_traverse_key_chain: int (struct tdb_context *, TDB_DATA, tdb_traverse_func, void *) tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *) tdb_unlock: int (struct tdb_context *, int, int) tdb_unlockall: int (struct tdb_context *) tdb_unlockall_read: int (struct tdb_context *) tdb_validate_freelist: int (struct tdb_context *, int *) tdb_wipe_all: int (struct tdb_context *) tdb-1.4.2/Makefile0000660000000000000000000000166413444661620013700 0ustar rootroot00000000000000# simple makefile wrapper to run waf WAF_BIN=`PATH=buildtools/bin:../../buildtools/bin:$$PATH which waf` WAF_BINARY=$(PYTHON) $(WAF_BIN) WAF=PYTHONHASHSEED=1 WAF_MAKE=1 $(WAF_BINARY) all: $(WAF) build install: $(WAF) install uninstall: $(WAF) uninstall test: FORCE $(WAF) test $(TEST_OPTIONS) testenv: $(WAF) test --testenv $(TEST_OPTIONS) quicktest: $(WAF) test --quick $(TEST_OPTIONS) dist: touch .tmplock WAFLOCK=.tmplock $(WAF) dist distcheck: touch .tmplock WAFLOCK=.tmplock $(WAF) distcheck clean: $(WAF) clean distclean: $(WAF) distclean reconfigure: configure $(WAF) reconfigure show_waf_options: $(WAF) --help # some compatibility make targets everything: all testsuite: all check: test torture: all # this should do an install as well, once install is finished installcheck: test etags: $(WAF) etags ctags: $(WAF) ctags pydoctor: $(WAF) pydoctor bin/%:: FORCE $(WAF) --targets=`basename $@` FORCE: tdb-1.4.2/_tdb_text.py0000660000000000000000000000642113444661620014562 0ustar rootroot00000000000000# Text wrapper for tdb bindings # # Copyright (C) 2015 Petr Viktorin # Published under the GNU LGPLv3 or later import sys import tdb class TdbTextWrapper(object): """Text interface for a TDB file""" def __init__(self, tdb): self._tdb = tdb @property def raw(self): return self._tdb def get(self, key): key = key.encode('utf-8') result = self._tdb.get(key) if result is not None: return result.decode('utf-8') def append(self, key, value): key = key.encode('utf-8') value = value.encode('utf-8') self._tdb.append(key, value) def firstkey(self): result = self._tdb.firstkey() if result: return result.decode('utf-8') def nextkey(self, key): key = key.encode('utf-8') result = self._tdb.nextkey(key) if result is not None: return result.decode('utf-8') def delete(self, key): key = key.encode('utf-8') self._tdb.delete(key) def store(self, key, value): key = key.encode('utf-8') value = value.encode('utf-8') self._tdb.store(key, value) def __iter__(self): for key in iter(self._tdb): yield key.decode('utf-8') def __getitem__(self, key): key = key.encode('utf-8') result = self._tdb[key] return result.decode('utf-8') def __contains__(self, key): key = key.encode('utf-8') return key in self._tdb def __repr__(self): return '' % self._tdb def __setitem__(self, key, value): key = key.encode('utf-8') value = value.encode('utf-8') self._tdb[key] = value def __delitem__(self, key): key = key.encode('utf-8') del self._tdb[key] if sys.version_info > (3, 0): keys = __iter__ else: iterkeys = __iter__ has_key = __contains__ ## Add wrappers for functions and getters that don't deal with text def _add_wrapper(name): orig = getattr(tdb.Tdb, name) def wrapper(self, *args, **kwargs): return orig(self._tdb, *args, **kwargs) wrapper.__name__ = orig.__name__ wrapper.__doc__ = orig.__doc__ setattr(TdbTextWrapper, name, wrapper) for name in ("transaction_cancel", "transaction_commit", "transaction_prepare_commit", "transaction_start", "reopen", "lock_all", "unlock_all", "read_lock_all", "read_unlock_all", "close", "add_flags", "remove_flags", "clear", "repack", "enable_seqnum", "increment_seqnum_nonblock", ): _add_wrapper(name) def _add_getter(name): orig = getattr(tdb.Tdb, name) doc = orig.__doc__ def getter(self): return getattr(self._tdb, name) def setter(self, value): return setattr(self._tdb, name, value) setattr(TdbTextWrapper, name, property(getter, setter, doc=doc)) for name in ("hash_size", "map_size", "freelist_size", "flags", "max_dead", "filename", "seqnum", "text", ): _add_getter(name) tdb-1.4.2/common/check.c0000660000000000000000000003150013527011454014735 0ustar rootroot00000000000000 /* Unix SMB/CIFS implementation. trivial database library Copyright (C) Rusty Russell 2009 ** NOTE! The following LGPL license applies to the tdb ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "tdb_private.h" /* Since we opened it, these shouldn't fail unless it's recent corruption. */ static bool tdb_check_header(struct tdb_context *tdb, tdb_off_t *recovery) { struct tdb_header hdr; uint32_t h1, h2; if (tdb->methods->tdb_read(tdb, 0, &hdr, sizeof(hdr), 0) == -1) return false; if (strcmp(hdr.magic_food, TDB_MAGIC_FOOD) != 0) goto corrupt; CONVERT(hdr); if (hdr.version != TDB_VERSION) goto corrupt; if (hdr.rwlocks != 0 && hdr.rwlocks != TDB_FEATURE_FLAG_MAGIC && hdr.rwlocks != TDB_HASH_RWLOCK_MAGIC) goto corrupt; tdb_header_hash(tdb, &h1, &h2); if (hdr.magic1_hash && hdr.magic2_hash && (hdr.magic1_hash != h1 || hdr.magic2_hash != h2)) goto corrupt; if (hdr.hash_size == 0) goto corrupt; if (hdr.hash_size != tdb->hash_size) goto corrupt; if (hdr.recovery_start != 0 && hdr.recovery_start < TDB_DATA_START(tdb->hash_size)) goto corrupt; *recovery = hdr.recovery_start; return true; corrupt: tdb->ecode = TDB_ERR_CORRUPT; TDB_LOG((tdb, TDB_DEBUG_ERROR, "Header is corrupt\n")); return false; } /* Generic record header check. */ static bool tdb_check_record(struct tdb_context *tdb, tdb_off_t off, const struct tdb_record *rec) { tdb_off_t tailer; /* Check rec->next: 0 or points to record offset, aligned. */ if (rec->next > 0 && rec->next < TDB_DATA_START(tdb->hash_size)){ TDB_LOG((tdb, TDB_DEBUG_ERROR, "Record offset %u too small next %u\n", off, rec->next)); goto corrupt; } if (rec->next + sizeof(*rec) < rec->next) { TDB_LOG((tdb, TDB_DEBUG_ERROR, "Record offset %u too large next %u\n", off, rec->next)); goto corrupt; } if ((rec->next % TDB_ALIGNMENT) != 0) { TDB_LOG((tdb, TDB_DEBUG_ERROR, "Record offset %u misaligned next %u\n", off, rec->next)); goto corrupt; } if (tdb_oob(tdb, rec->next, sizeof(*rec), 0)) goto corrupt; /* Check rec_len: similar to rec->next, implies next record. */ if ((rec->rec_len % TDB_ALIGNMENT) != 0) { TDB_LOG((tdb, TDB_DEBUG_ERROR, "Record offset %u misaligned length %u\n", off, rec->rec_len)); goto corrupt; } /* Must fit tailer. */ if (rec->rec_len < sizeof(tailer)) { TDB_LOG((tdb, TDB_DEBUG_ERROR, "Record offset %u too short length %u\n", off, rec->rec_len)); goto corrupt; } /* OOB allows "right at the end" access, so this works for last rec. */ if (tdb_oob(tdb, off, sizeof(*rec)+rec->rec_len, 0)) goto corrupt; /* Check tailer. */ if (tdb_ofs_read(tdb, off+sizeof(*rec)+rec->rec_len-sizeof(tailer), &tailer) == -1) goto corrupt; if (tailer != sizeof(*rec) + rec->rec_len) { TDB_LOG((tdb, TDB_DEBUG_ERROR, "Record offset %u invalid tailer\n", off)); goto corrupt; } return true; corrupt: tdb->ecode = TDB_ERR_CORRUPT; return false; } /* Grab some bytes: may copy if can't use mmap. Caller has already done bounds check. */ static TDB_DATA get_bytes(struct tdb_context *tdb, tdb_off_t off, tdb_len_t len) { TDB_DATA d; d.dsize = len; if (tdb->transaction == NULL && tdb->map_ptr != NULL) d.dptr = (unsigned char *)tdb->map_ptr + off; else d.dptr = tdb_alloc_read(tdb, off, d.dsize); return d; } /* Frees data if we're not able to simply use mmap. */ static void put_bytes(struct tdb_context *tdb, TDB_DATA d) { if (tdb->transaction == NULL && tdb->map_ptr != NULL) return; free(d.dptr); } /* We use the excellent Jenkins lookup3 hash; this is based on hash_word2. * See: http://burtleburtle.net/bob/c/lookup3.c */ #define rot(x,k) (((x)<<(k)) | ((x)>>(32-(k)))) static void hash(uint32_t key, uint32_t *pc, uint32_t *pb) { uint32_t a,b,c; /* Set up the internal state */ a = b = c = 0xdeadbeef + *pc; c += *pb; a += key; c ^= b; c -= rot(b,14); a ^= c; a -= rot(c,11); b ^= a; b -= rot(a,25); c ^= b; c -= rot(b,16); a ^= c; a -= rot(c,4); b ^= a; b -= rot(a,14); c ^= b; c -= rot(b,24); *pc=c; *pb=b; } /* We want to check that all free records are in the free list (only once), and all free list entries are free records. Similarly for each hash chain of used records. Doing that naively (without walking hash chains, since we want to be linear) means keeping a list of records which have been seen in each hash chain, and another of records pointed to (ie. next pointers from records and the initial hash chain heads). These two lists should be equal. This will take 8 bytes per record, and require sorting at the end. So instead, we record each offset in a bitmap such a way that recording it twice will cancel out. Since each offset should appear exactly twice, the bitmap should be zero at the end. The approach was inspired by Bloom Filters (see Wikipedia). For each value, we flip K bits in a bitmap of size N. The number of distinct arrangements is: N! / (K! * (N-K)!) Of course, not all arrangements are actually distinct, but testing shows this formula to be close enough. So, if K == 8 and N == 256, the probability of two things flipping the same bits is 1 in 409,663,695,276,000. Given that ldb uses a hash size of 10000, using 32 bytes per hash chain (320k) seems reasonable. */ #define NUM_HASHES 8 #define BITMAP_BITS 256 static void bit_flip(unsigned char bits[], unsigned int idx) { bits[idx / CHAR_BIT] ^= (1 << (idx % CHAR_BIT)); } /* We record offsets in a bitmap for the particular chain it should be in. */ static void record_offset(unsigned char bits[], tdb_off_t off) { uint32_t h1 = off, h2 = 0; unsigned int i; /* We get two good hash values out of jhash2, so we use both. Then * we keep going to produce further hash values. */ for (i = 0; i < NUM_HASHES / 2; i++) { hash(off, &h1, &h2); bit_flip(bits, h1 % BITMAP_BITS); bit_flip(bits, h2 % BITMAP_BITS); h2++; } } /* Check that an in-use record is valid. */ static bool tdb_check_used_record(struct tdb_context *tdb, tdb_off_t off, const struct tdb_record *rec, unsigned char **hashes, int (*check)(TDB_DATA, TDB_DATA, void *), void *private_data) { TDB_DATA key, data; tdb_len_t len; if (!tdb_check_record(tdb, off, rec)) return false; /* key + data + tailer must fit in record */ len = rec->key_len; len += rec->data_len; if (len < rec->data_len) { /* overflow */ TDB_LOG((tdb, TDB_DEBUG_ERROR, "Record lengths overflow\n")); return false; } len += sizeof(tdb_off_t); if (len < sizeof(tdb_off_t)) { /* overflow */ TDB_LOG((tdb, TDB_DEBUG_ERROR, "Record lengths overflow\n")); return false; } if (len > rec->rec_len) { TDB_LOG((tdb, TDB_DEBUG_ERROR, "Record offset %u too short for contents\n", off)); return false; } key = get_bytes(tdb, off + sizeof(*rec), rec->key_len); if (!key.dptr) return false; if (tdb->hash_fn(&key) != rec->full_hash) { TDB_LOG((tdb, TDB_DEBUG_ERROR, "Record offset %u has incorrect hash\n", off)); goto fail_put_key; } /* Mark this offset as a known value for this hash bucket. */ record_offset(hashes[BUCKET(rec->full_hash)+1], off); /* And similarly if the next pointer is valid. */ if (rec->next) record_offset(hashes[BUCKET(rec->full_hash)+1], rec->next); /* If they supply a check function and this record isn't dead, get data and feed it. */ if (check && rec->magic != TDB_DEAD_MAGIC) { data = get_bytes(tdb, off + sizeof(*rec) + rec->key_len, rec->data_len); if (!data.dptr) goto fail_put_key; if (check(key, data, private_data) == -1) goto fail_put_data; put_bytes(tdb, data); } put_bytes(tdb, key); return true; fail_put_data: put_bytes(tdb, data); fail_put_key: put_bytes(tdb, key); return false; } /* Check that an unused record is valid. */ static bool tdb_check_free_record(struct tdb_context *tdb, tdb_off_t off, const struct tdb_record *rec, unsigned char **hashes) { if (!tdb_check_record(tdb, off, rec)) return false; /* Mark this offset as a known value for the free list. */ record_offset(hashes[0], off); /* And similarly if the next pointer is valid. */ if (rec->next) record_offset(hashes[0], rec->next); return true; } /* Slow, but should be very rare. */ size_t tdb_dead_space(struct tdb_context *tdb, tdb_off_t off) { size_t len; for (len = 0; off + len < tdb->map_size; len++) { char c; if (tdb->methods->tdb_read(tdb, off, &c, 1, 0)) return 0; if (c != 0 && c != 0x42) break; } return len; } _PUBLIC_ int tdb_check(struct tdb_context *tdb, int (*check)(TDB_DATA key, TDB_DATA data, void *private_data), void *private_data) { unsigned int h; unsigned char **hashes; tdb_off_t off, recovery_start; struct tdb_record rec; bool found_recovery = false; tdb_len_t dead; bool locked; /* Read-only databases use no locking at all: it's best-effort. * We may have a write lock already, so skip that case too. */ if (tdb->read_only || tdb->allrecord_lock.count != 0) { locked = false; } else { if (tdb_lockall_read(tdb) == -1) return -1; locked = true; } /* Make sure we know true size of the underlying file. */ tdb_oob(tdb, tdb->map_size, 1, 1); /* Header must be OK: also gets us the recovery ptr, if any. */ if (!tdb_check_header(tdb, &recovery_start)) goto unlock; /* We should have the whole header, too. */ if (tdb->map_size < TDB_DATA_START(tdb->hash_size)) { tdb->ecode = TDB_ERR_CORRUPT; TDB_LOG((tdb, TDB_DEBUG_ERROR, "File too short for hashes\n")); goto unlock; } /* One big malloc: pointers then bit arrays. */ hashes = (unsigned char **)calloc( 1, sizeof(hashes[0]) * (1+tdb->hash_size) + BITMAP_BITS / CHAR_BIT * (1+tdb->hash_size)); if (!hashes) { tdb->ecode = TDB_ERR_OOM; goto unlock; } /* Initialize pointers */ hashes[0] = (unsigned char *)(&hashes[1+tdb->hash_size]); for (h = 1; h < 1+tdb->hash_size; h++) hashes[h] = hashes[h-1] + BITMAP_BITS / CHAR_BIT; /* Freelist and hash headers are all in a row: read them. */ for (h = 0; h < 1+tdb->hash_size; h++) { if (tdb_ofs_read(tdb, FREELIST_TOP + h*sizeof(tdb_off_t), &off) == -1) goto free; if (off) record_offset(hashes[h], off); } /* For each record, read it in and check it's ok. */ for (off = TDB_DATA_START(tdb->hash_size); off < tdb->map_size; off += sizeof(rec) + rec.rec_len) { if (tdb->methods->tdb_read(tdb, off, &rec, sizeof(rec), DOCONV()) == -1) goto free; switch (rec.magic) { case TDB_MAGIC: case TDB_DEAD_MAGIC: if (!tdb_check_used_record(tdb, off, &rec, hashes, check, private_data)) goto free; break; case TDB_FREE_MAGIC: if (!tdb_check_free_record(tdb, off, &rec, hashes)) goto free; break; /* If we crash after ftruncate, we can get zeroes or fill. */ case TDB_RECOVERY_INVALID_MAGIC: case 0x42424242: if (recovery_start == off) { found_recovery = true; break; } dead = tdb_dead_space(tdb, off); if (dead < sizeof(rec)) goto corrupt; TDB_LOG((tdb, TDB_DEBUG_ERROR, "Dead space at %u-%u (of %u)\n", off, off + dead, tdb->map_size)); rec.rec_len = dead - sizeof(rec); break; case TDB_RECOVERY_MAGIC: if (recovery_start != off) { TDB_LOG((tdb, TDB_DEBUG_ERROR, "Unexpected recovery record at offset %u\n", off)); goto free; } found_recovery = true; break; default: ; corrupt: tdb->ecode = TDB_ERR_CORRUPT; TDB_LOG((tdb, TDB_DEBUG_ERROR, "Bad magic 0x%x at offset %u\n", rec.magic, off)); goto free; } } /* Now, hashes should all be empty: each record exists and is referred * to by one other. */ for (h = 0; h < 1+tdb->hash_size; h++) { unsigned int i; for (i = 0; i < BITMAP_BITS / CHAR_BIT; i++) { if (hashes[h][i] != 0) { tdb->ecode = TDB_ERR_CORRUPT; TDB_LOG((tdb, TDB_DEBUG_ERROR, "Hashes do not match records\n")); goto free; } } } /* We must have found recovery area if there was one. */ if (recovery_start != 0 && !found_recovery) { TDB_LOG((tdb, TDB_DEBUG_ERROR, "Expected a recovery area at %u\n", recovery_start)); goto free; } free(hashes); if (locked) { tdb_unlockall_read(tdb); } return 0; free: free(hashes); unlock: if (locked) { tdb_unlockall_read(tdb); } return -1; } tdb-1.4.2/common/dump.c0000660000000000000000000000755213444661620014643 0ustar rootroot00000000000000 /* Unix SMB/CIFS implementation. trivial database library Copyright (C) Andrew Tridgell 1999-2005 Copyright (C) Paul `Rusty' Russell 2000 Copyright (C) Jeremy Allison 2000-2003 ** NOTE! The following LGPL license applies to the tdb ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "tdb_private.h" static tdb_off_t tdb_dump_record(struct tdb_context *tdb, int hash, tdb_off_t offset) { struct tdb_record rec; tdb_off_t tailer_ofs, tailer; if (tdb->methods->tdb_read(tdb, offset, (char *)&rec, sizeof(rec), DOCONV()) == -1) { printf("ERROR: failed to read record at %u\n", offset); return 0; } printf(" rec: hash=%d offset=0x%08x next=0x%08x rec_len=%u " "key_len=%u data_len=%u full_hash=0x%08x magic=0x%08x\n", hash, offset, rec.next, rec.rec_len, rec.key_len, rec.data_len, rec.full_hash, rec.magic); tailer_ofs = offset + sizeof(rec) + rec.rec_len - sizeof(tdb_off_t); if (tdb_ofs_read(tdb, tailer_ofs, &tailer) == -1) { printf("ERROR: failed to read tailer at %u\n", tailer_ofs); return rec.next; } if (tailer != rec.rec_len + sizeof(rec)) { printf("ERROR: tailer does not match record! tailer=%u totalsize=%u\n", (unsigned int)tailer, (unsigned int)(rec.rec_len + sizeof(rec))); } return rec.next; } static int tdb_dump_chain(struct tdb_context *tdb, int i) { struct tdb_chainwalk_ctx chainwalk; tdb_off_t rec_ptr, top; if (i == -1) { top = FREELIST_TOP; } else { top = TDB_HASH_TOP(i); } if (tdb_lock(tdb, i, F_WRLCK) != 0) return -1; if (tdb_ofs_read(tdb, top, &rec_ptr) == -1) return tdb_unlock(tdb, i, F_WRLCK); tdb_chainwalk_init(&chainwalk, rec_ptr); if (rec_ptr) printf("hash=%d\n", i); while (rec_ptr) { bool ok; rec_ptr = tdb_dump_record(tdb, i, rec_ptr); ok = tdb_chainwalk_check(tdb, &chainwalk, rec_ptr); if (!ok) { printf("circular hash chain %d\n", i); break; } } return tdb_unlock(tdb, i, F_WRLCK); } _PUBLIC_ void tdb_dump_all(struct tdb_context *tdb) { uint32_t i; for (i=0;ihash_size;i++) { tdb_dump_chain(tdb, i); } printf("freelist:\n"); tdb_dump_chain(tdb, -1); } _PUBLIC_ int tdb_printfreelist(struct tdb_context *tdb) { int ret; long total_free = 0; tdb_off_t offset, rec_ptr; struct tdb_record rec; if ((ret = tdb_lock(tdb, -1, F_WRLCK)) != 0) return ret; offset = FREELIST_TOP; /* read in the freelist top */ if (tdb_ofs_read(tdb, offset, &rec_ptr) == -1) { tdb_unlock(tdb, -1, F_WRLCK); return 0; } printf("freelist top=[0x%08x]\n", rec_ptr ); while (rec_ptr) { if (tdb->methods->tdb_read(tdb, rec_ptr, (char *)&rec, sizeof(rec), DOCONV()) == -1) { tdb_unlock(tdb, -1, F_WRLCK); return -1; } if (rec.magic != TDB_FREE_MAGIC) { printf("bad magic 0x%08x in free list\n", rec.magic); tdb_unlock(tdb, -1, F_WRLCK); return -1; } printf("entry offset=[0x%08x], rec.rec_len = [0x%08x (%u)] (end = 0x%08x)\n", rec_ptr, rec.rec_len, rec.rec_len, rec_ptr + rec.rec_len); total_free += rec.rec_len; /* move to the next record */ rec_ptr = rec.next; } printf("total rec_len = [0x%08lx (%lu)]\n", total_free, total_free); return tdb_unlock(tdb, -1, F_WRLCK); } tdb-1.4.2/common/error.c0000660000000000000000000000360512406075657015030 0ustar rootroot00000000000000 /* Unix SMB/CIFS implementation. trivial database library Copyright (C) Andrew Tridgell 1999-2005 Copyright (C) Paul `Rusty' Russell 2000 Copyright (C) Jeremy Allison 2000-2003 ** NOTE! The following LGPL license applies to the tdb ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "tdb_private.h" _PUBLIC_ enum TDB_ERROR tdb_error(struct tdb_context *tdb) { return tdb->ecode; } static struct tdb_errname { enum TDB_ERROR ecode; const char *estring; } emap[] = { {TDB_SUCCESS, "Success"}, {TDB_ERR_CORRUPT, "Corrupt database"}, {TDB_ERR_IO, "IO Error"}, {TDB_ERR_LOCK, "Locking error"}, {TDB_ERR_OOM, "Out of memory"}, {TDB_ERR_EXISTS, "Record exists"}, {TDB_ERR_NOLOCK, "Lock exists on other keys"}, {TDB_ERR_EINVAL, "Invalid parameter"}, {TDB_ERR_NOEXIST, "Record does not exist"}, {TDB_ERR_RDONLY, "write not permitted"} }; /* Error string for the last tdb error */ _PUBLIC_ const char *tdb_errorstr(struct tdb_context *tdb) { uint32_t i; for (i = 0; i < sizeof(emap) / sizeof(struct tdb_errname); i++) if (tdb->ecode == emap[i].ecode) return emap[i].estring; return "Invalid error code"; } tdb-1.4.2/common/freelist.c0000660000000000000000000004257113527011454015507 0ustar rootroot00000000000000 /* Unix SMB/CIFS implementation. trivial database library Copyright (C) Andrew Tridgell 1999-2005 Copyright (C) Paul `Rusty' Russell 2000 Copyright (C) Jeremy Allison 2000-2003 ** NOTE! The following LGPL license applies to the tdb ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "tdb_private.h" /* read a freelist record and check for simple errors */ int tdb_rec_free_read(struct tdb_context *tdb, tdb_off_t off, struct tdb_record *rec) { if (tdb->methods->tdb_read(tdb, off, rec, sizeof(*rec),DOCONV()) == -1) return -1; if (rec->magic == TDB_MAGIC) { /* this happens when a app is showdown while deleting a record - we should not completely fail when this happens */ TDB_LOG((tdb, TDB_DEBUG_WARNING, "tdb_rec_free_read non-free magic 0x%x at offset=%u - fixing\n", rec->magic, off)); rec->magic = TDB_FREE_MAGIC; if (tdb_rec_write(tdb, off, rec) == -1) return -1; } if (rec->magic != TDB_FREE_MAGIC) { /* Ensure ecode is set for log fn. */ tdb->ecode = TDB_ERR_CORRUPT; TDB_LOG((tdb, TDB_DEBUG_WARNING, "tdb_rec_free_read bad magic 0x%x at offset=%u\n", rec->magic, off)); return -1; } if (tdb_oob(tdb, rec->next, sizeof(*rec), 0) != 0) return -1; return 0; } /* update a record tailer (must hold allocation lock) */ static int update_tailer(struct tdb_context *tdb, tdb_off_t offset, const struct tdb_record *rec) { tdb_off_t totalsize; /* Offset of tailer from record header */ totalsize = sizeof(*rec) + rec->rec_len; return tdb_ofs_write(tdb, offset + totalsize - sizeof(tdb_off_t), &totalsize); } /** * Read the record directly on the left. * Fail if there is no record on the left. */ static int read_record_on_left(struct tdb_context *tdb, tdb_off_t rec_ptr, tdb_off_t *left_p, struct tdb_record *left_r) { tdb_off_t left_ptr; tdb_off_t left_size; struct tdb_record left_rec; int ret; left_ptr = rec_ptr - sizeof(tdb_off_t); if (left_ptr <= TDB_DATA_START(tdb->hash_size)) { /* no record on the left */ return -1; } /* Read in tailer and jump back to header */ ret = tdb_ofs_read(tdb, left_ptr, &left_size); if (ret == -1) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_free: left offset read failed at %u\n", left_ptr)); return -1; } /* it could be uninitialised data */ if (left_size == 0 || left_size == TDB_PAD_U32) { return -1; } if (left_size > rec_ptr) { return -1; } left_ptr = rec_ptr - left_size; if (left_ptr < TDB_DATA_START(tdb->hash_size)) { return -1; } /* Now read in the left record */ ret = tdb->methods->tdb_read(tdb, left_ptr, &left_rec, sizeof(left_rec), DOCONV()); if (ret == -1) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_free: left read failed at %u (%u)\n", left_ptr, left_size)); return -1; } *left_p = left_ptr; *left_r = left_rec; return 0; } /** * Merge new freelist record with the direct left neighbour. * This assumes that left_rec represents the record * directly to the left of right_rec and that this is * a freelist record. */ static int merge_with_left_record(struct tdb_context *tdb, tdb_off_t left_ptr, struct tdb_record *left_rec, struct tdb_record *right_rec) { int ret; left_rec->rec_len += sizeof(*right_rec) + right_rec->rec_len; ret = tdb_rec_write(tdb, left_ptr, left_rec); if (ret == -1) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "merge_with_left_record: update_left failed at %u\n", left_ptr)); return -1; } ret = update_tailer(tdb, left_ptr, left_rec); if (ret == -1) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "merge_with_left_record: update_tailer failed at %u\n", left_ptr)); return -1; } return 0; } /** * Check whether the record left of a given freelist record is * also a freelist record, and if so, merge the two records. * * Return code: * -1 upon error * 0 if left was not a free record * 1 if left was free and successfully merged. * * The current record is handed in with pointer and fully read record. * * The left record pointer and struct can be retrieved as result * in lp and lr; */ static int check_merge_with_left_record(struct tdb_context *tdb, tdb_off_t rec_ptr, struct tdb_record *rec, tdb_off_t *lp, struct tdb_record *lr) { tdb_off_t left_ptr; struct tdb_record left_rec; int ret; ret = read_record_on_left(tdb, rec_ptr, &left_ptr, &left_rec); if (ret != 0) { return 0; } if (left_rec.magic != TDB_FREE_MAGIC) { return 0; } /* It's free - expand to include it. */ ret = merge_with_left_record(tdb, left_ptr, &left_rec, rec); if (ret != 0) { return -1; } if (lp != NULL) { *lp = left_ptr; } if (lr != NULL) { *lr = left_rec; } return 1; } /** * Check whether the record left of a given freelist record is * also a freelist record, and if so, merge the two records. * * Return code: * -1 upon error * 0 if left was not a free record * 1 if left was free and successfully merged. * * In this variant, the input record is specified just as the pointer * and is read from the database if needed. * * next_ptr will contain the original record's next pointer after * successful merging (which will be lost after merging), so that * the caller can update the last pointer. */ static int check_merge_ptr_with_left_record(struct tdb_context *tdb, tdb_off_t rec_ptr, tdb_off_t *next_ptr) { tdb_off_t left_ptr; struct tdb_record rec, left_rec; int ret; ret = read_record_on_left(tdb, rec_ptr, &left_ptr, &left_rec); if (ret != 0) { return 0; } if (left_rec.magic != TDB_FREE_MAGIC) { return 0; } /* It's free - expand to include it. */ ret = tdb->methods->tdb_read(tdb, rec_ptr, &rec, sizeof(rec), DOCONV()); if (ret != 0) { return -1; } ret = merge_with_left_record(tdb, left_ptr, &left_rec, &rec); if (ret != 0) { return -1; } if (next_ptr != NULL) { *next_ptr = rec.next; } return 1; } /** * Add an element into the freelist. * * We merge the new record into the left record if it is also a * free record, but not with the right one. This makes the * operation O(1) instead of O(n): merging with the right record * requires a traverse of the freelist to find the previous * record in the free list. * * This prevents db traverses from being O(n^2) after a lot of deletes. */ int tdb_free(struct tdb_context *tdb, tdb_off_t offset, struct tdb_record *rec) { int ret; /* Allocation and tailer lock */ if (tdb_lock(tdb, -1, F_WRLCK) != 0) return -1; /* set an initial tailer, so if we fail we don't leave a bogus record */ if (update_tailer(tdb, offset, rec) != 0) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_free: update_tailer failed!\n")); goto fail; } ret = check_merge_with_left_record(tdb, offset, rec, NULL, NULL); if (ret == -1) { goto fail; } if (ret == 1) { /* merged */ goto done; } /* Nothing to merge, prepend to free list */ rec->magic = TDB_FREE_MAGIC; if (tdb_ofs_read(tdb, FREELIST_TOP, &rec->next) == -1 || tdb_rec_write(tdb, offset, rec) == -1 || tdb_ofs_write(tdb, FREELIST_TOP, &offset) == -1) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_free record write failed at offset=%u\n", offset)); goto fail; } done: /* And we're done. */ tdb_unlock(tdb, -1, F_WRLCK); return 0; fail: tdb_unlock(tdb, -1, F_WRLCK); return -1; } /* the core of tdb_allocate - called when we have decided which free list entry to use Note that we try to allocate by grabbing data from the end of an existing record, not the beginning. This is so the left merge in a free is more likely to be able to free up the record without fragmentation */ static tdb_off_t tdb_allocate_ofs(struct tdb_context *tdb, tdb_len_t length, tdb_off_t rec_ptr, struct tdb_record *rec, tdb_off_t last_ptr) { #define MIN_REC_SIZE (sizeof(struct tdb_record) + sizeof(tdb_off_t) + 8) if (rec->rec_len < length + MIN_REC_SIZE) { /* we have to grab the whole record */ /* unlink it from the previous record */ if (tdb_ofs_write(tdb, last_ptr, &rec->next) == -1) { return 0; } /* mark it not free */ rec->magic = TDB_MAGIC; if (tdb_rec_write(tdb, rec_ptr, rec) == -1) { return 0; } return rec_ptr; } /* we're going to just shorten the existing record */ rec->rec_len -= (length + sizeof(*rec)); if (tdb_rec_write(tdb, rec_ptr, rec) == -1) { return 0; } if (update_tailer(tdb, rec_ptr, rec) == -1) { return 0; } /* and setup the new record */ rec_ptr += sizeof(*rec) + rec->rec_len; memset(rec, '\0', sizeof(*rec)); rec->rec_len = length; rec->magic = TDB_MAGIC; if (tdb_rec_write(tdb, rec_ptr, rec) == -1) { return 0; } if (update_tailer(tdb, rec_ptr, rec) == -1) { return 0; } return rec_ptr; } /* allocate some space from the free list. The offset returned points to a unconnected tdb_record within the database with room for at least length bytes of total data 0 is returned if the space could not be allocated */ static tdb_off_t tdb_allocate_from_freelist( struct tdb_context *tdb, tdb_len_t length, struct tdb_record *rec) { tdb_off_t rec_ptr, last_ptr, newrec_ptr; struct tdb_chainwalk_ctx chainwalk; bool modified; struct { tdb_off_t rec_ptr, last_ptr; tdb_len_t rec_len; } bestfit; float multiplier = 1.0; bool merge_created_candidate; /* over-allocate to reduce fragmentation */ length *= 1.25; /* Extra bytes required for tailer */ length += sizeof(tdb_off_t); length = TDB_ALIGN(length, TDB_ALIGNMENT); again: merge_created_candidate = false; last_ptr = FREELIST_TOP; /* read in the freelist top */ if (tdb_ofs_read(tdb, FREELIST_TOP, &rec_ptr) == -1) return 0; modified = false; tdb_chainwalk_init(&chainwalk, rec_ptr); bestfit.rec_ptr = 0; bestfit.last_ptr = 0; bestfit.rec_len = 0; /* this is a best fit allocation strategy. Originally we used a first fit strategy, but it suffered from massive fragmentation issues when faced with a slowly increasing record size. */ while (rec_ptr) { int ret; tdb_off_t left_ptr; struct tdb_record left_rec; if (tdb_rec_free_read(tdb, rec_ptr, rec) == -1) { return 0; } ret = check_merge_with_left_record(tdb, rec_ptr, rec, &left_ptr, &left_rec); if (ret == -1) { return 0; } if (ret == 1) { /* merged */ rec_ptr = rec->next; ret = tdb_ofs_write(tdb, last_ptr, &rec->next); if (ret == -1) { return 0; } /* * We have merged the current record into the left * neighbour. So our traverse of the freelist will * skip it and consider the next record in the chain. * * But the enlarged left neighbour may be a candidate. * If it is, we can not directly use it, though. * The only thing we can do and have to do here is to * update the current best fit size in the chain if the * current best fit is the left record. (By that we may * worsen the best fit we already had, bit this is not a * problem.) * * If the current best fit is not the left record, * all we can do is remember the fact that a merge * created a new candidate so that we can trigger * a second walk of the freelist if at the end of * the first walk we have not found any fit. * This way we can avoid expanding the database. */ if (bestfit.rec_ptr == left_ptr) { bestfit.rec_len = left_rec.rec_len; } if (left_rec.rec_len > length) { merge_created_candidate = true; } modified = true; continue; } if (rec->rec_len >= length) { if (bestfit.rec_ptr == 0 || rec->rec_len < bestfit.rec_len) { bestfit.rec_len = rec->rec_len; bestfit.rec_ptr = rec_ptr; bestfit.last_ptr = last_ptr; } } /* move to the next record */ last_ptr = rec_ptr; rec_ptr = rec->next; if (!modified) { bool ok; ok = tdb_chainwalk_check(tdb, &chainwalk, rec_ptr); if (!ok) { return 0; } } /* if we've found a record that is big enough, then stop searching if its also not too big. The definition of 'too big' changes as we scan through */ if (bestfit.rec_len > 0 && bestfit.rec_len < length * multiplier) { break; } /* this multiplier means we only extremely rarely search more than 50 or so records. At 50 records we accept records up to 11 times larger than what we want */ multiplier *= 1.05; } if (bestfit.rec_ptr != 0) { if (tdb_rec_free_read(tdb, bestfit.rec_ptr, rec) == -1) { return 0; } newrec_ptr = tdb_allocate_ofs(tdb, length, bestfit.rec_ptr, rec, bestfit.last_ptr); return newrec_ptr; } if (merge_created_candidate) { goto again; } /* we didn't find enough space. See if we can expand the database and if we can then try again */ if (tdb_expand(tdb, length + sizeof(*rec)) == 0) goto again; return 0; } static bool tdb_alloc_dead( struct tdb_context *tdb, int hash, tdb_len_t length, tdb_off_t *rec_ptr, struct tdb_record *rec) { tdb_off_t last_ptr; *rec_ptr = tdb_find_dead(tdb, hash, rec, length, &last_ptr); if (*rec_ptr == 0) { return false; } /* * Unlink the record from the hash chain, it's about to be moved into * another one. */ return (tdb_ofs_write(tdb, last_ptr, &rec->next) == 0); } static void tdb_purge_dead(struct tdb_context *tdb, uint32_t hash) { int max_dead_records = tdb->max_dead_records; tdb->max_dead_records = 0; tdb_trim_dead(tdb, hash); tdb->max_dead_records = max_dead_records; } /* * Chain "hash" is assumed to be locked */ tdb_off_t tdb_allocate(struct tdb_context *tdb, int hash, tdb_len_t length, struct tdb_record *rec) { tdb_off_t ret; uint32_t i; if (tdb->max_dead_records == 0) { /* * No dead records to expect anywhere. Do the blocking * freelist lock without trying to steal from others */ goto blocking_freelist_allocate; } /* * The following loop tries to get the freelist lock nonblocking. If * it gets the lock, allocate from there. If the freelist is busy, * instead of waiting we try to steal dead records from other hash * chains. * * Be aware that we do nonblocking locks on the other hash chains as * well and fail gracefully. This way we avoid deadlocks (we block two * hash chains, something which is pretty bad normally) */ for (i=0; ihash_size; i++) { int list; list = BUCKET(hash+i); if (tdb_lock_nonblock(tdb, list, F_WRLCK) == 0) { bool got_dead; got_dead = tdb_alloc_dead(tdb, list, length, &ret, rec); tdb_unlock(tdb, list, F_WRLCK); if (got_dead) { return ret; } } if (tdb_lock_nonblock(tdb, -1, F_WRLCK) == 0) { /* * Under the freelist lock take the chance to give * back our dead records. */ tdb_purge_dead(tdb, hash); ret = tdb_allocate_from_freelist(tdb, length, rec); tdb_unlock(tdb, -1, F_WRLCK); return ret; } } blocking_freelist_allocate: if (tdb_lock(tdb, -1, F_WRLCK) == -1) { return 0; } /* * Dead records can happen even if max_dead_records==0, they * are older than the max_dead_records concept: They happen if * tdb_delete happens concurrently with a traverse. */ tdb_purge_dead(tdb, hash); ret = tdb_allocate_from_freelist(tdb, length, rec); tdb_unlock(tdb, -1, F_WRLCK); return ret; } /** * Merge adjacent records in the freelist. */ static int tdb_freelist_merge_adjacent(struct tdb_context *tdb, int *count_records, int *count_merged) { tdb_off_t cur, next; int count = 0; int merged = 0; int ret; ret = tdb_lock(tdb, -1, F_RDLCK); if (ret == -1) { return -1; } cur = FREELIST_TOP; while (tdb_ofs_read(tdb, cur, &next) == 0 && next != 0) { tdb_off_t next2; count++; ret = check_merge_ptr_with_left_record(tdb, next, &next2); if (ret == -1) { goto done; } if (ret == 1) { /* * merged: * now let cur->next point to next2 instead of next */ ret = tdb_ofs_write(tdb, cur, &next2); if (ret != 0) { goto done; } next = next2; merged++; } cur = next; } if (count_records != NULL) { *count_records = count; } if (count_merged != NULL) { *count_merged = merged; } ret = 0; done: tdb_unlock(tdb, -1, F_RDLCK); return ret; } /** * return the size of the freelist - no merging done */ static int tdb_freelist_size_no_merge(struct tdb_context *tdb) { tdb_off_t ptr; int count=0; if (tdb_lock(tdb, -1, F_RDLCK) == -1) { return -1; } ptr = FREELIST_TOP; while (tdb_ofs_read(tdb, ptr, &ptr) == 0 && ptr != 0) { count++; } tdb_unlock(tdb, -1, F_RDLCK); return count; } /** * return the size of the freelist - used to decide if we should repack * * As a side effect, adjacent records are merged unless the * database is read-only, in order to reduce the fragmentation * without repacking. */ _PUBLIC_ int tdb_freelist_size(struct tdb_context *tdb) { int count = 0; if (tdb->read_only) { count = tdb_freelist_size_no_merge(tdb); } else { int ret; ret = tdb_freelist_merge_adjacent(tdb, &count, NULL); if (ret != 0) { return -1; } } return count; } tdb-1.4.2/common/freelistcheck.c0000660000000000000000000000517013527011454016477 0ustar rootroot00000000000000/* Unix SMB/CIFS implementation. trivial database library Copyright (C) Jeremy Allison 2006 ** NOTE! The following LGPL license applies to the tdb ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "tdb_private.h" /* Check the freelist is good and contains no loops. Very memory intensive - only do this as a consistency checker. Heh heh - uses an in memory tdb as the storage for the "seen" record list. For some reason this strikes me as extremely clever as I don't have to write another tree data structure implementation :-). */ static int seen_insert(struct tdb_context *mem_tdb, tdb_off_t rec_ptr) { TDB_DATA key; key.dptr = (unsigned char *)&rec_ptr; key.dsize = sizeof(rec_ptr); return tdb_store(mem_tdb, key, tdb_null, TDB_INSERT); } _PUBLIC_ int tdb_validate_freelist(struct tdb_context *tdb, int *pnum_entries) { struct tdb_context *mem_tdb = NULL; struct tdb_record rec; tdb_off_t rec_ptr, last_ptr; int ret = -1; *pnum_entries = 0; mem_tdb = tdb_open("flval", tdb->hash_size, TDB_INTERNAL, O_RDWR, 0600); if (!mem_tdb) { return -1; } if (tdb_lock(tdb, -1, F_WRLCK) == -1) { tdb_close(mem_tdb); return 0; } last_ptr = FREELIST_TOP; /* Store the FREELIST_TOP record. */ if (seen_insert(mem_tdb, last_ptr) == -1) { tdb->ecode = TDB_ERR_CORRUPT; ret = -1; goto fail; } /* read in the freelist top */ if (tdb_ofs_read(tdb, FREELIST_TOP, &rec_ptr) == -1) { goto fail; } while (rec_ptr) { /* If we can't store this record (we've seen it before) then the free list has a loop and must be corrupt. */ if (seen_insert(mem_tdb, rec_ptr)) { tdb->ecode = TDB_ERR_CORRUPT; ret = -1; goto fail; } if (tdb_rec_free_read(tdb, rec_ptr, &rec) == -1) { goto fail; } /* move to the next record */ rec_ptr = rec.next; *pnum_entries += 1; } ret = 0; fail: tdb_close(mem_tdb); tdb_unlock(tdb, -1, F_WRLCK); return ret; } tdb-1.4.2/common/hash.c0000660000000000000000000003051113444661620014610 0ustar rootroot00000000000000 /* Unix SMB/CIFS implementation. trivial database library Copyright (C) Rusty Russell 2010 ** NOTE! The following LGPL license applies to the tdb ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "tdb_private.h" /* This is based on the hash algorithm from gdbm */ unsigned int tdb_old_hash(TDB_DATA *key) { uint32_t value; /* Used to compute the hash value. */ uint32_t i; /* Used to cycle through random values. */ /* Set the initial value from the key size. */ for (value = 0x238F13AF * key->dsize, i=0; i < key->dsize; i++) value = (value + (key->dptr[i] << (i*5 % 24))); return (1103515243 * value + 12345); } #ifndef WORDS_BIGENDIAN # define HASH_LITTLE_ENDIAN 1 # define HASH_BIG_ENDIAN 0 #else # define HASH_LITTLE_ENDIAN 0 # define HASH_BIG_ENDIAN 1 #endif /* ------------------------------------------------------------------------------- lookup3.c, by Bob Jenkins, May 2006, Public Domain. These are functions for producing 32-bit hashes for hash table lookup. hash_word(), hashlittle(), hashlittle2(), hashbig(), mix(), and final() are externally useful functions. Routines to test the hash are included if SELF_TEST is defined. You can use this free for any purpose. It's in the public domain. It has no warranty. You probably want to use hashlittle(). hashlittle() and hashbig() hash byte arrays. hashlittle() is is faster than hashbig() on little-endian machines. Intel and AMD are little-endian machines. On second thought, you probably want hashlittle2(), which is identical to hashlittle() except it returns two 32-bit hashes for the price of one. You could implement hashbig2() if you wanted but I haven't bothered here. If you want to find a hash of, say, exactly 7 integers, do a = i1; b = i2; c = i3; mix(a,b,c); a += i4; b += i5; c += i6; mix(a,b,c); a += i7; final(a,b,c); then use c as the hash value. If you have a variable length array of 4-byte integers to hash, use hash_word(). If you have a byte array (like a character string), use hashlittle(). If you have several byte arrays, or a mix of things, see the comments above hashlittle(). Why is this so big? I read 12 bytes at a time into 3 4-byte integers, then mix those integers. This is fast (you can do a lot more thorough mixing with 12*3 instructions on 3 integers than you can with 3 instructions on 1 byte), but shoehorning those bytes into integers efficiently is messy. */ #define hashsize(n) ((uint32_t)1<<(n)) #define hashmask(n) (hashsize(n)-1) #define rot(x,k) (((x)<<(k)) | ((x)>>(32-(k)))) /* ------------------------------------------------------------------------------- mix -- mix 3 32-bit values reversibly. This is reversible, so any information in (a,b,c) before mix() is still in (a,b,c) after mix(). If four pairs of (a,b,c) inputs are run through mix(), or through mix() in reverse, there are at least 32 bits of the output that are sometimes the same for one pair and different for another pair. This was tested for: * pairs that differed by one bit, by two bits, in any combination of top bits of (a,b,c), or in any combination of bottom bits of (a,b,c). * "differ" is defined as +, -, ^, or ~^. For + and -, I transformed the output delta to a Gray code (a^(a>>1)) so a string of 1's (as is commonly produced by subtraction) look like a single 1-bit difference. * the base values were pseudorandom, all zero but one bit set, or all zero plus a counter that starts at zero. Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that satisfy this are 4 6 8 16 19 4 9 15 3 18 27 15 14 9 3 7 17 3 Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing for "differ" defined as + with a one-bit base and a two-bit delta. I used http://burtleburtle.net/bob/hash/avalanche.html to choose the operations, constants, and arrangements of the variables. This does not achieve avalanche. There are input bits of (a,b,c) that fail to affect some output bits of (a,b,c), especially of a. The most thoroughly mixed value is c, but it doesn't really even achieve avalanche in c. This allows some parallelism. Read-after-writes are good at doubling the number of bits affected, so the goal of mixing pulls in the opposite direction as the goal of parallelism. I did what I could. Rotates seem to cost as much as shifts on every machine I could lay my hands on, and rotates are much kinder to the top and bottom bits, so I used rotates. ------------------------------------------------------------------------------- */ #define mix(a,b,c) \ { \ a -= c; a ^= rot(c, 4); c += b; \ b -= a; b ^= rot(a, 6); a += c; \ c -= b; c ^= rot(b, 8); b += a; \ a -= c; a ^= rot(c,16); c += b; \ b -= a; b ^= rot(a,19); a += c; \ c -= b; c ^= rot(b, 4); b += a; \ } /* ------------------------------------------------------------------------------- final -- final mixing of 3 32-bit values (a,b,c) into c Pairs of (a,b,c) values differing in only a few bits will usually produce values of c that look totally different. This was tested for * pairs that differed by one bit, by two bits, in any combination of top bits of (a,b,c), or in any combination of bottom bits of (a,b,c). * "differ" is defined as +, -, ^, or ~^. For + and -, I transformed the output delta to a Gray code (a^(a>>1)) so a string of 1's (as is commonly produced by subtraction) look like a single 1-bit difference. * the base values were pseudorandom, all zero but one bit set, or all zero plus a counter that starts at zero. These constants passed: 14 11 25 16 4 14 24 12 14 25 16 4 14 24 and these came close: 4 8 15 26 3 22 24 10 8 15 26 3 22 24 11 8 15 26 3 22 24 ------------------------------------------------------------------------------- */ #define final(a,b,c) \ { \ c ^= b; c -= rot(b,14); \ a ^= c; a -= rot(c,11); \ b ^= a; b -= rot(a,25); \ c ^= b; c -= rot(b,16); \ a ^= c; a -= rot(c,4); \ b ^= a; b -= rot(a,14); \ c ^= b; c -= rot(b,24); \ } /* ------------------------------------------------------------------------------- hashlittle() -- hash a variable-length key into a 32-bit value k : the key (the unaligned variable-length array of bytes) length : the length of the key, counting by bytes val2 : IN: can be any 4-byte value OUT: second 32 bit hash. Returns a 32-bit value. Every bit of the key affects every bit of the return value. Two keys differing by one or two bits will have totally different hash values. Note that the return value is better mixed than val2, so use that first. The best hash table sizes are powers of 2. There is no need to do mod a prime (mod is sooo slow!). If you need less than 32 bits, use a bitmask. For example, if you need only 10 bits, do h = (h & hashmask(10)); In which case, the hash table should have hashsize(10) elements. If you are hashing n strings (uint8_t **)k, do it like this: for (i=0, h=0; i 12) { a += k[0]; b += k[1]; c += k[2]; mix(a,b,c); length -= 12; k += 3; } /*----------------------------- handle the last (probably partial) block */ k8 = (const uint8_t *)k; switch(length) { case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; case 11: c+=((uint32_t)k8[10])<<16; FALL_THROUGH; case 10: c+=((uint32_t)k8[9])<<8; FALL_THROUGH; case 9 : c+=k8[8]; FALL_THROUGH; case 8 : b+=k[1]; a+=k[0]; break; case 7 : b+=((uint32_t)k8[6])<<16; FALL_THROUGH; case 6 : b+=((uint32_t)k8[5])<<8; FALL_THROUGH; case 5 : b+=k8[4]; FALL_THROUGH; case 4 : a+=k[0]; break; case 3 : a+=((uint32_t)k8[2])<<16; FALL_THROUGH; case 2 : a+=((uint32_t)k8[1])<<8; FALL_THROUGH; case 1 : a+=k8[0]; break; case 0 : return c; } } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) { const uint16_t *k = (const uint16_t *)key; /* read 16-bit chunks */ const uint8_t *k8; /*--------------- all but last block: aligned reads and different mixing */ while (length > 12) { a += k[0] + (((uint32_t)k[1])<<16); b += k[2] + (((uint32_t)k[3])<<16); c += k[4] + (((uint32_t)k[5])<<16); mix(a,b,c); length -= 12; k += 6; } /*----------------------------- handle the last (probably partial) block */ k8 = (const uint8_t *)k; switch(length) { case 12: c+=k[4]+(((uint32_t)k[5])<<16); b+=k[2]+(((uint32_t)k[3])<<16); a+=k[0]+(((uint32_t)k[1])<<16); break; case 11: c+=((uint32_t)k8[10])<<16; FALL_THROUGH; case 10: c+=k[4]; b+=k[2]+(((uint32_t)k[3])<<16); a+=k[0]+(((uint32_t)k[1])<<16); break; case 9 : c+=k8[8]; FALL_THROUGH; case 8 : b+=k[2]+(((uint32_t)k[3])<<16); a+=k[0]+(((uint32_t)k[1])<<16); break; case 7 : b+=((uint32_t)k8[6])<<16; FALL_THROUGH; case 6 : b+=k[2]; a+=k[0]+(((uint32_t)k[1])<<16); break; case 5 : b+=k8[4]; FALL_THROUGH; case 4 : a+=k[0]+(((uint32_t)k[1])<<16); break; case 3 : a+=((uint32_t)k8[2])<<16; FALL_THROUGH; case 2 : a+=k[0]; break; case 1 : a+=k8[0]; break; case 0 : return c; /* zero length requires no mixing */ } } else { /* need to read the key one byte at a time */ const uint8_t *k = (const uint8_t *)key; /*--------------- all but the last block: affect some 32 bits of (a,b,c) */ while (length > 12) { a += k[0]; a += ((uint32_t)k[1])<<8; a += ((uint32_t)k[2])<<16; a += ((uint32_t)k[3])<<24; b += k[4]; b += ((uint32_t)k[5])<<8; b += ((uint32_t)k[6])<<16; b += ((uint32_t)k[7])<<24; c += k[8]; c += ((uint32_t)k[9])<<8; c += ((uint32_t)k[10])<<16; c += ((uint32_t)k[11])<<24; mix(a,b,c); length -= 12; k += 12; } /*-------------------------------- last block: affect all 32 bits of (c) */ switch(length) { case 12: c+=((uint32_t)k[11])<<24; FALL_THROUGH; case 11: c+=((uint32_t)k[10])<<16; FALL_THROUGH; case 10: c+=((uint32_t)k[9])<<8; FALL_THROUGH; case 9 : c+=k[8]; FALL_THROUGH; case 8 : b+=((uint32_t)k[7])<<24; FALL_THROUGH; case 7 : b+=((uint32_t)k[6])<<16; FALL_THROUGH; case 6 : b+=((uint32_t)k[5])<<8; FALL_THROUGH; case 5 : b+=k[4]; FALL_THROUGH; case 4 : a+=((uint32_t)k[3])<<24; FALL_THROUGH; case 3 : a+=((uint32_t)k[2])<<16; FALL_THROUGH; case 2 : a+=((uint32_t)k[1])<<8; FALL_THROUGH; case 1 : a+=k[0]; break; case 0 : return c; } } final(a,b,c); return c; } _PUBLIC_ unsigned int tdb_jenkins_hash(TDB_DATA *key) { return hashlittle(key->dptr, key->dsize); } tdb-1.4.2/common/io.c0000660000000000000000000004517113527011454014300 0ustar rootroot00000000000000 /* Unix SMB/CIFS implementation. trivial database library Copyright (C) Andrew Tridgell 1999-2005 Copyright (C) Paul `Rusty' Russell 2000 Copyright (C) Jeremy Allison 2000-2003 ** NOTE! The following LGPL license applies to the tdb ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "tdb_private.h" /* * We prepend the mutex area, so fixup offsets. See mutex.c for details. * tdb->hdr_ofs is 0 or header.mutex_size. * * Note: that we only have the 4GB limit of tdb_off_t for * tdb->map_size. The file size on disk can be 4GB + tdb->hdr_ofs! */ static bool tdb_adjust_offset(struct tdb_context *tdb, off_t *off) { off_t tmp = tdb->hdr_ofs + *off; if ((tmp < tdb->hdr_ofs) || (tmp < *off)) { errno = EIO; return false; } *off = tmp; return true; } static ssize_t tdb_pwrite(struct tdb_context *tdb, const void *buf, size_t count, off_t offset) { ssize_t ret; if (!tdb_adjust_offset(tdb, &offset)) { return -1; } do { ret = pwrite(tdb->fd, buf, count, offset); } while ((ret == -1) && (errno == EINTR)); return ret; } static ssize_t tdb_pread(struct tdb_context *tdb, void *buf, size_t count, off_t offset) { ssize_t ret; if (!tdb_adjust_offset(tdb, &offset)) { return -1; } do { ret = pread(tdb->fd, buf, count, offset); } while ((ret == -1) && (errno == EINTR)); return ret; } static int tdb_ftruncate(struct tdb_context *tdb, off_t length) { ssize_t ret; if (!tdb_adjust_offset(tdb, &length)) { return -1; } do { ret = ftruncate(tdb->fd, length); } while ((ret == -1) && (errno == EINTR)); return ret; } #ifdef HAVE_POSIX_FALLOCATE static int tdb_posix_fallocate(struct tdb_context *tdb, off_t offset, off_t len) { ssize_t ret; if (!tdb_adjust_offset(tdb, &offset)) { return -1; } do { ret = posix_fallocate(tdb->fd, offset, len); } while ((ret == -1) && (errno == EINTR)); return ret; } #endif static int tdb_fstat(struct tdb_context *tdb, struct stat *buf) { int ret; ret = fstat(tdb->fd, buf); if (ret == -1) { return -1; } if (buf->st_size < tdb->hdr_ofs) { errno = EIO; return -1; } buf->st_size -= tdb->hdr_ofs; return ret; } /* check for an out of bounds access - if it is out of bounds then see if the database has been expanded by someone else and expand if necessary */ static int tdb_notrans_oob( struct tdb_context *tdb, tdb_off_t off, tdb_len_t len, int probe) { struct stat st; if (len + off < len) { if (!probe) { /* Ensure ecode is set for log fn. */ tdb->ecode = TDB_ERR_IO; TDB_LOG((tdb, TDB_DEBUG_FATAL,"tdb_oob off %u len %u wrap\n", off, len)); } return -1; } /* * This duplicates functionality from tdb_oob(). Don't remove: * we still have direct callers of tdb->methods->tdb_oob() * inside transaction.c. */ if (off + len <= tdb->map_size) return 0; if (tdb->flags & TDB_INTERNAL) { if (!probe) { /* Ensure ecode is set for log fn. */ tdb->ecode = TDB_ERR_IO; TDB_LOG((tdb, TDB_DEBUG_FATAL,"tdb_oob len %u beyond internal malloc size %u\n", (int)(off + len), (int)tdb->map_size)); } return -1; } if (tdb_fstat(tdb, &st) == -1) { tdb->ecode = TDB_ERR_IO; return -1; } /* Beware >4G files! */ if ((tdb_off_t)st.st_size != st.st_size) { /* Ensure ecode is set for log fn. */ tdb->ecode = TDB_ERR_IO; TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_oob len %llu too large!\n", (long long)st.st_size)); return -1; } /* Unmap, update size, remap. We do this unconditionally, to handle * the unusual case where the db is truncated. * * This can happen to a child using tdb_reopen_all(true) on a * TDB_CLEAR_IF_FIRST tdb whose parent crashes: the next * opener will truncate the database. */ if (tdb_munmap(tdb) == -1) { tdb->ecode = TDB_ERR_IO; return -1; } tdb->map_size = st.st_size; if (tdb_mmap(tdb) != 0) { return -1; } if (st.st_size < (size_t)off + len) { if (!probe) { /* Ensure ecode is set for log fn. */ tdb->ecode = TDB_ERR_IO; TDB_LOG((tdb, TDB_DEBUG_FATAL,"tdb_oob len %u beyond eof at %u\n", (int)(off + len), (int)st.st_size)); } return -1; } return 0; } /* write a lump of data at a specified offset */ static int tdb_write(struct tdb_context *tdb, tdb_off_t off, const void *buf, tdb_len_t len) { if (len == 0) { return 0; } if (tdb->read_only || tdb->traverse_read) { tdb->ecode = TDB_ERR_RDONLY; return -1; } if (tdb_oob(tdb, off, len, 0) != 0) return -1; if (tdb->map_ptr) { memcpy(off + (char *)tdb->map_ptr, buf, len); } else { #ifdef HAVE_INCOHERENT_MMAP tdb->ecode = TDB_ERR_IO; return -1; #else ssize_t written; written = tdb_pwrite(tdb, buf, len, off); if ((written != (ssize_t)len) && (written != -1)) { /* try once more */ tdb->ecode = TDB_ERR_IO; TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_write: wrote only " "%zi of %u bytes at %u, trying once more\n", written, len, off)); written = tdb_pwrite(tdb, (const char *)buf+written, len-written, off+written); } if (written == -1) { /* Ensure ecode is set for log fn. */ tdb->ecode = TDB_ERR_IO; TDB_LOG((tdb, TDB_DEBUG_FATAL,"tdb_write failed at %u " "len=%u (%s)\n", off, len, strerror(errno))); return -1; } else if (written != (ssize_t)len) { tdb->ecode = TDB_ERR_IO; TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_write: failed to " "write %u bytes at %u in two attempts\n", len, off)); return -1; } #endif } return 0; } /* Endian conversion: we only ever deal with 4 byte quantities */ void *tdb_convert(void *buf, uint32_t size) { uint32_t i, *p = (uint32_t *)buf; for (i = 0; i < size / 4; i++) p[i] = TDB_BYTEREV(p[i]); return buf; } /* read a lump of data at a specified offset, maybe convert */ static int tdb_read(struct tdb_context *tdb, tdb_off_t off, void *buf, tdb_len_t len, int cv) { if (tdb_oob(tdb, off, len, 0) != 0) { return -1; } if (tdb->map_ptr) { memcpy(buf, off + (char *)tdb->map_ptr, len); } else { #ifdef HAVE_INCOHERENT_MMAP tdb->ecode = TDB_ERR_IO; return -1; #else ssize_t ret; ret = tdb_pread(tdb, buf, len, off); if (ret != (ssize_t)len) { /* Ensure ecode is set for log fn. */ tdb->ecode = TDB_ERR_IO; TDB_LOG((tdb, TDB_DEBUG_FATAL,"tdb_read failed at %u " "len=%u ret=%zi (%s) map_size=%u\n", off, len, ret, strerror(errno), tdb->map_size)); return -1; } #endif } if (cv) { tdb_convert(buf, len); } return 0; } /* do an unlocked scan of the hash table heads to find the next non-zero head. The value will then be confirmed with the lock held */ static void tdb_next_hash_chain(struct tdb_context *tdb, uint32_t *chain) { uint32_t h = *chain; if (tdb->map_ptr) { for (;h < tdb->hash_size;h++) { if (0 != *(uint32_t *)(TDB_HASH_TOP(h) + (unsigned char *)tdb->map_ptr)) { break; } } } else { uint32_t off=0; for (;h < tdb->hash_size;h++) { if (tdb_ofs_read(tdb, TDB_HASH_TOP(h), &off) != 0 || off != 0) { break; } } } (*chain) = h; } int tdb_munmap(struct tdb_context *tdb) { if (tdb->flags & TDB_INTERNAL) return 0; #ifdef HAVE_MMAP if (tdb->map_ptr) { int ret; ret = munmap(tdb->map_ptr, tdb->map_size); if (ret != 0) return ret; } #endif tdb->map_ptr = NULL; return 0; } /* If mmap isn't coherent, *everyone* must always mmap. */ static bool should_mmap(const struct tdb_context *tdb) { #ifdef HAVE_INCOHERENT_MMAP return true; #else return !(tdb->flags & TDB_NOMMAP); #endif } int tdb_mmap(struct tdb_context *tdb) { if (tdb->flags & TDB_INTERNAL) return 0; #ifdef HAVE_MMAP if (should_mmap(tdb)) { tdb->map_ptr = mmap(NULL, tdb->map_size, PROT_READ|(tdb->read_only? 0:PROT_WRITE), MAP_SHARED|MAP_FILE, tdb->fd, tdb->hdr_ofs); /* * NB. When mmap fails it returns MAP_FAILED *NOT* NULL !!!! */ if (tdb->map_ptr == MAP_FAILED) { tdb->map_ptr = NULL; TDB_LOG((tdb, TDB_DEBUG_WARNING, "tdb_mmap failed for size %u (%s)\n", tdb->map_size, strerror(errno))); #ifdef HAVE_INCOHERENT_MMAP tdb->ecode = TDB_ERR_IO; return -1; #endif } } else { tdb->map_ptr = NULL; } #else tdb->map_ptr = NULL; #endif return 0; } /* expand a file. we prefer to use ftruncate, as that is what posix says to use for mmap expansion */ static int tdb_expand_file(struct tdb_context *tdb, tdb_off_t size, tdb_off_t addition) { char buf[8192]; tdb_off_t new_size; int ret; if (tdb->read_only || tdb->traverse_read) { tdb->ecode = TDB_ERR_RDONLY; return -1; } if (!tdb_add_off_t(size, addition, &new_size)) { tdb->ecode = TDB_ERR_OOM; TDB_LOG((tdb, TDB_DEBUG_FATAL, "expand_file write " "overflow detected current size[%u] addition[%u]!\n", (unsigned)size, (unsigned)addition)); errno = ENOSPC; return -1; } #ifdef HAVE_POSIX_FALLOCATE ret = tdb_posix_fallocate(tdb, size, addition); if (ret == 0) { return 0; } if (ret == ENOSPC) { /* * The Linux glibc (at least as of 2.24) fallback if * the file system does not support fallocate does not * reset the file size back to where it was. Also, to * me it is unclear from the posix spec of * posix_fallocate whether this is allowed or * not. Better be safe than sorry and "goto fail" but * "return -1" here, leaving the EOF pointer too * large. */ goto fail; } /* * Retry the "old" way. Possibly unnecessary, but looking at * our configure script there seem to be weird failure modes * for posix_fallocate. See commit 3264a98ff16de, which * probably refers to * https://sourceware.org/bugzilla/show_bug.cgi?id=1083. */ #endif ret = tdb_ftruncate(tdb, new_size); if (ret == -1) { char b = 0; ssize_t written = tdb_pwrite(tdb, &b, 1, new_size - 1); if (written == 0) { /* try once more, potentially revealing errno */ written = tdb_pwrite(tdb, &b, 1, new_size - 1); } if (written == 0) { /* again - give up, guessing errno */ errno = ENOSPC; } if (written != 1) { tdb->ecode = TDB_ERR_OOM; TDB_LOG((tdb, TDB_DEBUG_FATAL, "expand_file to %u failed (%s)\n", (unsigned)new_size, strerror(errno))); return -1; } } /* now fill the file with something. This ensures that the file isn't sparse, which would be very bad if we ran out of disk. This must be done with write, not via mmap */ memset(buf, TDB_PAD_BYTE, sizeof(buf)); while (addition) { size_t n = addition>sizeof(buf)?sizeof(buf):addition; ssize_t written = tdb_pwrite(tdb, buf, n, size); if (written == 0) { /* prevent infinite loops: try _once_ more */ written = tdb_pwrite(tdb, buf, n, size); } if (written == 0) { /* give up, trying to provide a useful errno */ tdb->ecode = TDB_ERR_OOM; TDB_LOG((tdb, TDB_DEBUG_FATAL, "expand_file write " "returned 0 twice: giving up!\n")); errno = ENOSPC; goto fail; } if (written == -1) { tdb->ecode = TDB_ERR_OOM; TDB_LOG((tdb, TDB_DEBUG_FATAL, "expand_file write of " "%u bytes failed (%s)\n", (int)n, strerror(errno))); goto fail; } if (written != n) { TDB_LOG((tdb, TDB_DEBUG_WARNING, "expand_file: wrote " "only %zu of %zi bytes - retrying\n", written, n)); } addition -= written; size += written; } return 0; fail: { int err = errno; /* * We're holding the freelist lock or are inside a * transaction. Cutting the file is safe, the space we * tried to allocate can't have been used anywhere in * the meantime. */ ret = tdb_ftruncate(tdb, size); if (ret == -1) { TDB_LOG((tdb, TDB_DEBUG_WARNING, "expand_file: " "retruncate to %ju failed\n", (uintmax_t)size)); } errno = err; } return -1; } /* You need 'size', this tells you how much you should expand by. */ tdb_off_t tdb_expand_adjust(tdb_off_t map_size, tdb_off_t size, int page_size) { tdb_off_t new_size, top_size, increment; tdb_off_t max_size = UINT32_MAX - map_size; if (size > max_size) { /* * We can't round up anymore, just give back * what we're asked for. * * The caller has to take care of the ENOSPC handling. */ return size; } /* limit size in order to avoid using up huge amounts of memory for * in memory tdbs if an oddball huge record creeps in */ if (size > 100 * 1024) { increment = size * 2; } else { increment = size * 100; } if (increment < size) { goto overflow; } if (!tdb_add_off_t(map_size, increment, &top_size)) { goto overflow; } /* always make room for at least top_size more records, and at least 25% more space. if the DB is smaller than 100MiB, otherwise grow it by 10% only. */ if (map_size > 100 * 1024 * 1024) { new_size = map_size * 1.10; } else { new_size = map_size * 1.25; } if (new_size < map_size) { goto overflow; } /* Round the database up to a multiple of the page size */ new_size = MAX(top_size, new_size); if (new_size + page_size < new_size) { /* There's a "+" in TDB_ALIGN that might overflow... */ goto overflow; } return TDB_ALIGN(new_size, page_size) - map_size; overflow: /* * Somewhere in between we went over 4GB. Make one big jump to * exactly 4GB database size. */ return max_size; } /* expand the database at least size bytes by expanding the underlying file and doing the mmap again if necessary */ int tdb_expand(struct tdb_context *tdb, tdb_off_t size) { struct tdb_record rec; tdb_off_t offset; tdb_off_t new_size; if (tdb_lock(tdb, -1, F_WRLCK) == -1) { TDB_LOG((tdb, TDB_DEBUG_ERROR, "lock failed in tdb_expand\n")); return -1; } /* must know about any previous expansions by another process */ tdb_oob(tdb, tdb->map_size, 1, 1); /* * Note: that we don't care about tdb->hdr_ofs != 0 here * * The 4GB limitation is just related to tdb->map_size * and the offset calculation in the records. * * The file on disk can be up to 4GB + tdb->hdr_ofs */ size = tdb_expand_adjust(tdb->map_size, size, tdb->page_size); if (!tdb_add_off_t(tdb->map_size, size, &new_size)) { tdb->ecode = TDB_ERR_OOM; TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_expand " "overflow detected current map_size[%u] size[%u]!\n", (unsigned)tdb->map_size, (unsigned)size)); goto fail; } /* form a new freelist record */ offset = tdb->map_size; memset(&rec,'\0',sizeof(rec)); rec.rec_len = size - sizeof(rec); if (tdb->flags & TDB_INTERNAL) { char *new_map_ptr; new_map_ptr = (char *)realloc(tdb->map_ptr, new_size); if (!new_map_ptr) { tdb->ecode = TDB_ERR_OOM; goto fail; } tdb->map_ptr = new_map_ptr; tdb->map_size = new_size; } else { int ret; /* * expand the file itself */ ret = tdb->methods->tdb_expand_file(tdb, tdb->map_size, size); if (ret != 0) { goto fail; } /* Explicitly remap: if we're in a transaction, this won't * happen automatically! */ tdb_munmap(tdb); tdb->map_size = new_size; if (tdb_mmap(tdb) != 0) { goto fail; } } /* link it into the free list */ if (tdb_free(tdb, offset, &rec) == -1) goto fail; tdb_unlock(tdb, -1, F_WRLCK); return 0; fail: tdb_unlock(tdb, -1, F_WRLCK); return -1; } int _tdb_oob(struct tdb_context *tdb, tdb_off_t off, tdb_len_t len, int probe) { int ret = tdb->methods->tdb_oob(tdb, off, len, probe); return ret; } /* read/write a tdb_off_t */ int tdb_ofs_read(struct tdb_context *tdb, tdb_off_t offset, tdb_off_t *d) { return tdb->methods->tdb_read(tdb, offset, (char*)d, sizeof(*d), DOCONV()); } int tdb_ofs_write(struct tdb_context *tdb, tdb_off_t offset, tdb_off_t *d) { tdb_off_t off = *d; return tdb->methods->tdb_write(tdb, offset, CONVERT(off), sizeof(*d)); } /* read a lump of data, allocating the space for it */ unsigned char *tdb_alloc_read(struct tdb_context *tdb, tdb_off_t offset, tdb_len_t len) { unsigned char *buf; /* some systems don't like zero length malloc */ if (!(buf = (unsigned char *)malloc(len ? len : 1))) { /* Ensure ecode is set for log fn. */ tdb->ecode = TDB_ERR_OOM; TDB_LOG((tdb, TDB_DEBUG_ERROR,"tdb_alloc_read malloc failed len=%u (%s)\n", len, strerror(errno))); return NULL; } if (tdb->methods->tdb_read(tdb, offset, buf, len, 0) == -1) { SAFE_FREE(buf); return NULL; } return buf; } /* Give a piece of tdb data to a parser */ int tdb_parse_data(struct tdb_context *tdb, TDB_DATA key, tdb_off_t offset, tdb_len_t len, int (*parser)(TDB_DATA key, TDB_DATA data, void *private_data), void *private_data) { TDB_DATA data; int result; data.dsize = len; if ((tdb->transaction == NULL) && (tdb->map_ptr != NULL)) { /* * Optimize by avoiding the malloc/memcpy/free, point the * parser directly at the mmap area. */ if (tdb_oob(tdb, offset, len, 0) != 0) { return -1; } data.dptr = offset + (unsigned char *)tdb->map_ptr; return parser(key, data, private_data); } if (!(data.dptr = tdb_alloc_read(tdb, offset, len))) { return -1; } result = parser(key, data, private_data); free(data.dptr); return result; } /* read/write a record */ int tdb_rec_read(struct tdb_context *tdb, tdb_off_t offset, struct tdb_record *rec) { int ret; tdb_len_t overall_len; if (tdb->methods->tdb_read(tdb, offset, rec, sizeof(*rec),DOCONV()) == -1) return -1; if (TDB_BAD_MAGIC(rec)) { /* Ensure ecode is set for log fn. */ tdb->ecode = TDB_ERR_CORRUPT; TDB_LOG((tdb, TDB_DEBUG_FATAL,"tdb_rec_read bad magic 0x%x at offset=%u\n", rec->magic, offset)); return -1; } overall_len = rec->key_len + rec->data_len; if (overall_len < rec->data_len) { /* overflow */ return -1; } if (overall_len > rec->rec_len) { /* invalid record */ return -1; } ret = tdb_oob(tdb, offset, rec->key_len, 1); if (ret == -1) { return -1; } ret = tdb_oob(tdb, offset, rec->data_len, 1); if (ret == -1) { return -1; } ret = tdb_oob(tdb, offset, rec->rec_len, 1); if (ret == -1) { return -1; } return tdb_oob(tdb, rec->next, sizeof(*rec), 0); } int tdb_rec_write(struct tdb_context *tdb, tdb_off_t offset, struct tdb_record *rec) { struct tdb_record r = *rec; return tdb->methods->tdb_write(tdb, offset, CONVERT(r), sizeof(r)); } static const struct tdb_methods io_methods = { tdb_read, tdb_write, tdb_next_hash_chain, tdb_notrans_oob, tdb_expand_file, }; /* initialise the default methods table */ void tdb_io_init(struct tdb_context *tdb) { tdb->methods = &io_methods; } tdb-1.4.2/common/lock.c0000660000000000000000000006173313527011454014623 0ustar rootroot00000000000000 /* Unix SMB/CIFS implementation. trivial database library Copyright (C) Andrew Tridgell 1999-2005 Copyright (C) Paul `Rusty' Russell 2000 Copyright (C) Jeremy Allison 2000-2003 ** NOTE! The following LGPL license applies to the tdb ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "tdb_private.h" _PUBLIC_ void tdb_setalarm_sigptr(struct tdb_context *tdb, volatile sig_atomic_t *ptr) { tdb->interrupt_sig_ptr = ptr; } static int fcntl_lock(struct tdb_context *tdb, int rw, off_t off, off_t len, bool waitflag) { struct flock fl; int cmd; #ifdef USE_TDB_MUTEX_LOCKING { int ret; if (tdb_mutex_lock(tdb, rw, off, len, waitflag, &ret)) { return ret; } } #endif fl.l_type = rw; fl.l_whence = SEEK_SET; fl.l_start = off; fl.l_len = len; fl.l_pid = 0; cmd = waitflag ? F_SETLKW : F_SETLK; return fcntl(tdb->fd, cmd, &fl); } static int fcntl_unlock(struct tdb_context *tdb, int rw, off_t off, off_t len) { struct flock fl; #if 0 /* Check they matched up locks and unlocks correctly. */ char line[80]; FILE *locks; bool found = false; locks = fopen("/proc/locks", "r"); while (fgets(line, 80, locks)) { char *p; int type, start, l; /* eg. 1: FLOCK ADVISORY WRITE 2440 08:01:2180826 0 EOF */ p = strchr(line, ':') + 1; if (strncmp(p, " POSIX ADVISORY ", strlen(" POSIX ADVISORY "))) continue; p += strlen(" FLOCK ADVISORY "); if (strncmp(p, "READ ", strlen("READ ")) == 0) type = F_RDLCK; else if (strncmp(p, "WRITE ", strlen("WRITE ")) == 0) type = F_WRLCK; else abort(); p += 6; if (atoi(p) != getpid()) continue; p = strchr(strchr(p, ' ') + 1, ' ') + 1; start = atoi(p); p = strchr(p, ' ') + 1; if (strncmp(p, "EOF", 3) == 0) l = 0; else l = atoi(p) - start + 1; if (off == start) { if (len != l) { fprintf(stderr, "Len %u should be %u: %s", (int)len, l, line); abort(); } if (type != rw) { fprintf(stderr, "Type %s wrong: %s", rw == F_RDLCK ? "READ" : "WRITE", line); abort(); } found = true; break; } } if (!found) { fprintf(stderr, "Unlock on %u@%u not found!\n", (int)off, (int)len); abort(); } fclose(locks); #endif #ifdef USE_TDB_MUTEX_LOCKING { int ret; if (tdb_mutex_unlock(tdb, rw, off, len, &ret)) { return ret; } } #endif fl.l_type = F_UNLCK; fl.l_whence = SEEK_SET; fl.l_start = off; fl.l_len = len; fl.l_pid = 0; return fcntl(tdb->fd, F_SETLKW, &fl); } /* * Calculate the lock offset for a list * * list -1 is the freelist, otherwise a hash chain. * * Note that we consistently (but without real reason) lock hash chains at an * offset that is 4 bytes below the real offset of the corresponding list head * in the db. * * This is the memory layout of the hashchain array: * * FREELIST_TOP + 0 = freelist * FREELIST_TOP + 4 = hashtable list 0 * FREELIST_TOP + 8 = hashtable list 1 * ... * * Otoh lock_offset computes: * * freelist = FREELIST_TOP - 4 * list 0 = FREELIST_TOP + 0 * list 1 = FREELIST_TOP + 4 * ... * * Unfortunately we can't change this calculation in order to align the locking * offset with the memory layout, as that would make the locking incompatible * between different tdb versions. */ static tdb_off_t lock_offset(int list) { return FREELIST_TOP + 4*list; } /* a byte range locking function - return 0 on success this functions locks/unlocks "len" byte at the specified offset. On error, errno is also set so that errors are passed back properly through tdb_open(). note that a len of zero means lock to end of file */ int tdb_brlock(struct tdb_context *tdb, int rw_type, tdb_off_t offset, size_t len, enum tdb_lock_flags flags) { int ret; if (tdb->flags & TDB_NOLOCK) { return 0; } if (flags & TDB_LOCK_MARK_ONLY) { return 0; } if ((rw_type == F_WRLCK) && (tdb->read_only || tdb->traverse_read)) { tdb->ecode = TDB_ERR_RDONLY; return -1; } do { ret = fcntl_lock(tdb, rw_type, offset, len, flags & TDB_LOCK_WAIT); /* Check for a sigalarm break. */ if (ret == -1 && errno == EINTR && tdb->interrupt_sig_ptr && *tdb->interrupt_sig_ptr) { break; } } while (ret == -1 && errno == EINTR); if (ret == -1) { tdb->ecode = TDB_ERR_LOCK; /* Generic lock error. errno set by fcntl. * EAGAIN is an expected return from non-blocking * locks. */ if (!(flags & TDB_LOCK_PROBE) && errno != EAGAIN) { TDB_LOG((tdb, TDB_DEBUG_TRACE,"tdb_brlock failed (fd=%d) at offset %u rw_type=%d flags=%d len=%zu\n", tdb->fd, offset, rw_type, flags, len)); } return -1; } return 0; } int tdb_brunlock(struct tdb_context *tdb, int rw_type, tdb_off_t offset, size_t len) { int ret; if (tdb->flags & TDB_NOLOCK) { return 0; } do { ret = fcntl_unlock(tdb, rw_type, offset, len); } while (ret == -1 && errno == EINTR); if (ret == -1) { TDB_LOG((tdb, TDB_DEBUG_TRACE,"tdb_brunlock failed (fd=%d) at offset %u rw_type=%u len=%zu\n", tdb->fd, offset, rw_type, len)); } return ret; } /* * Do a tdb_brlock in a loop. Some OSes (such as solaris) have too * conservative deadlock detection and claim a deadlock when progress can be * made. For those OSes we may loop for a while. */ static int tdb_brlock_retry(struct tdb_context *tdb, int rw_type, tdb_off_t offset, size_t len, enum tdb_lock_flags flags) { int count = 1000; while (count--) { struct timeval tv; int ret; ret = tdb_brlock(tdb, rw_type, offset, len, flags); if (ret == 0) { return 0; } if (errno != EDEADLK) { break; } /* sleep for as short a time as we can - more portable than usleep() */ tv.tv_sec = 0; tv.tv_usec = 1; select(0, NULL, NULL, NULL, &tv); } return -1; } /* upgrade a read lock to a write lock. */ int tdb_allrecord_upgrade(struct tdb_context *tdb) { int ret; if (tdb->allrecord_lock.count != 1) { TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_allrecord_upgrade failed: count %u too high\n", tdb->allrecord_lock.count)); tdb->ecode = TDB_ERR_LOCK; return -1; } if (tdb->allrecord_lock.off != 1) { TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_allrecord_upgrade failed: already upgraded?\n")); tdb->ecode = TDB_ERR_LOCK; return -1; } if (tdb_have_mutexes(tdb)) { ret = tdb_mutex_allrecord_upgrade(tdb); if (ret == -1) { goto fail; } ret = tdb_brlock_retry(tdb, F_WRLCK, lock_offset(tdb->hash_size), 0, TDB_LOCK_WAIT|TDB_LOCK_PROBE); if (ret == -1) { tdb_mutex_allrecord_downgrade(tdb); } } else { ret = tdb_brlock_retry(tdb, F_WRLCK, FREELIST_TOP, 0, TDB_LOCK_WAIT|TDB_LOCK_PROBE); } if (ret == 0) { tdb->allrecord_lock.ltype = F_WRLCK; tdb->allrecord_lock.off = 0; return 0; } fail: TDB_LOG((tdb, TDB_DEBUG_TRACE,"tdb_allrecord_upgrade failed\n")); return -1; } static struct tdb_lock_type *find_nestlock(struct tdb_context *tdb, tdb_off_t offset) { int i; for (i=0; inum_lockrecs; i++) { if (tdb->lockrecs[i].off == offset) { return &tdb->lockrecs[i]; } } return NULL; } /* lock an offset in the database. */ int tdb_nest_lock(struct tdb_context *tdb, uint32_t offset, int ltype, enum tdb_lock_flags flags) { struct tdb_lock_type *new_lck; if (offset >= lock_offset(tdb->hash_size)) { tdb->ecode = TDB_ERR_LOCK; TDB_LOG((tdb, TDB_DEBUG_ERROR,"tdb_lock: invalid offset %u for ltype=%d\n", offset, ltype)); return -1; } if (tdb->flags & TDB_NOLOCK) return 0; new_lck = find_nestlock(tdb, offset); if (new_lck) { if ((new_lck->ltype == F_RDLCK) && (ltype == F_WRLCK)) { if (!tdb_have_mutexes(tdb)) { int ret; /* * Upgrade the underlying fcntl * lock. Mutexes don't do readlocks, * so this only applies to fcntl * locking. */ ret = tdb_brlock(tdb, ltype, offset, 1, flags); if (ret != 0) { return ret; } } new_lck->ltype = F_WRLCK; } /* * Just increment the in-memory struct, posix locks * don't stack. */ new_lck->count++; return 0; } if (tdb->num_lockrecs == tdb->lockrecs_array_length) { new_lck = (struct tdb_lock_type *)realloc( tdb->lockrecs, sizeof(*tdb->lockrecs) * (tdb->num_lockrecs+1)); if (new_lck == NULL) { errno = ENOMEM; return -1; } tdb->lockrecs_array_length = tdb->num_lockrecs+1; tdb->lockrecs = new_lck; } /* Since fcntl locks don't nest, we do a lock for the first one, and simply bump the count for future ones */ if (tdb_brlock(tdb, ltype, offset, 1, flags)) { return -1; } new_lck = &tdb->lockrecs[tdb->num_lockrecs]; new_lck->off = offset; new_lck->count = 1; new_lck->ltype = ltype; tdb->num_lockrecs++; return 0; } static int tdb_lock_and_recover(struct tdb_context *tdb) { int ret; /* We need to match locking order in transaction commit. */ if (tdb_brlock(tdb, F_WRLCK, FREELIST_TOP, 0, TDB_LOCK_WAIT)) { return -1; } if (tdb_brlock(tdb, F_WRLCK, OPEN_LOCK, 1, TDB_LOCK_WAIT)) { tdb_brunlock(tdb, F_WRLCK, FREELIST_TOP, 0); return -1; } ret = tdb_transaction_recover(tdb); tdb_brunlock(tdb, F_WRLCK, OPEN_LOCK, 1); tdb_brunlock(tdb, F_WRLCK, FREELIST_TOP, 0); return ret; } static bool have_data_locks(const struct tdb_context *tdb) { int i; for (i = 0; i < tdb->num_lockrecs; i++) { if (tdb->lockrecs[i].off >= lock_offset(-1)) return true; } return false; } /* * A allrecord lock allows us to avoid per chain locks. Check if the allrecord * lock is strong enough. */ static int tdb_lock_covered_by_allrecord_lock(struct tdb_context *tdb, int ltype) { if (ltype == F_RDLCK) { /* * The allrecord_lock is equal (F_RDLCK) or stronger * (F_WRLCK). Pass. */ return 0; } if (tdb->allrecord_lock.ltype == F_RDLCK) { /* * We ask for ltype==F_WRLCK, but the allrecord_lock * is too weak. We can't upgrade here, so fail. */ tdb->ecode = TDB_ERR_LOCK; return -1; } /* * Asking for F_WRLCK, allrecord is F_WRLCK as well. Pass. */ return 0; } static int tdb_lock_list(struct tdb_context *tdb, int list, int ltype, enum tdb_lock_flags waitflag) { int ret; bool check = false; if (tdb->allrecord_lock.count) { return tdb_lock_covered_by_allrecord_lock(tdb, ltype); } /* * Check for recoveries: Someone might have kill -9'ed a process * during a commit. */ check = !have_data_locks(tdb); ret = tdb_nest_lock(tdb, lock_offset(list), ltype, waitflag); if (ret == 0 && check && tdb_needs_recovery(tdb)) { tdb_nest_unlock(tdb, lock_offset(list), ltype, false); if (tdb_lock_and_recover(tdb) == -1) { return -1; } return tdb_lock_list(tdb, list, ltype, waitflag); } return ret; } /* lock a list in the database. list -1 is the alloc list */ int tdb_lock(struct tdb_context *tdb, int list, int ltype) { int ret; ret = tdb_lock_list(tdb, list, ltype, TDB_LOCK_WAIT); if (ret) { TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_lock failed on list %d " "ltype=%d (%s)\n", list, ltype, strerror(errno))); } return ret; } /* lock a list in the database. list -1 is the alloc list. non-blocking lock */ _PUBLIC_ int tdb_lock_nonblock(struct tdb_context *tdb, int list, int ltype) { return tdb_lock_list(tdb, list, ltype, TDB_LOCK_NOWAIT); } int tdb_nest_unlock(struct tdb_context *tdb, uint32_t offset, int ltype, bool mark_lock) { int ret = -1; struct tdb_lock_type *lck; if (tdb->flags & TDB_NOLOCK) return 0; /* Sanity checks */ if (offset >= lock_offset(tdb->hash_size)) { TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_unlock: offset %u invalid (%d)\n", offset, tdb->hash_size)); return ret; } lck = find_nestlock(tdb, offset); if ((lck == NULL) || (lck->count == 0)) { TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_unlock: count is 0\n")); return -1; } if (lck->count > 1) { lck->count--; return 0; } /* * This lock has count==1 left, so we need to unlock it in the * kernel. We don't bother with decrementing the in-memory array * element, we're about to overwrite it with the last array element * anyway. */ if (mark_lock) { ret = 0; } else { ret = tdb_brunlock(tdb, ltype, offset, 1); } /* * Shrink the array by overwriting the element just unlocked with the * last array element. */ *lck = tdb->lockrecs[--tdb->num_lockrecs]; /* * We don't bother with realloc when the array shrinks, but if we have * a completely idle tdb we should get rid of the locked array. */ if (ret) TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_unlock: An error occurred unlocking!\n")); return ret; } _PUBLIC_ int tdb_unlock(struct tdb_context *tdb, int list, int ltype) { /* a global lock allows us to avoid per chain locks */ if (tdb->allrecord_lock.count) { return tdb_lock_covered_by_allrecord_lock(tdb, ltype); } return tdb_nest_unlock(tdb, lock_offset(list), ltype, false); } /* get the transaction lock */ int tdb_transaction_lock(struct tdb_context *tdb, int ltype, enum tdb_lock_flags lockflags) { return tdb_nest_lock(tdb, TRANSACTION_LOCK, ltype, lockflags); } /* release the transaction lock */ int tdb_transaction_unlock(struct tdb_context *tdb, int ltype) { return tdb_nest_unlock(tdb, TRANSACTION_LOCK, ltype, false); } /* Returns 0 if all done, -1 if error, 1 if ok. */ static int tdb_allrecord_check(struct tdb_context *tdb, int ltype, enum tdb_lock_flags flags, bool upgradable) { /* There are no locks on read-only dbs */ if (tdb->read_only || tdb->traverse_read) { tdb->ecode = TDB_ERR_LOCK; return -1; } if (tdb->allrecord_lock.count && tdb->allrecord_lock.ltype == (uint32_t)ltype) { tdb->allrecord_lock.count++; return 0; } if (tdb->allrecord_lock.count) { /* a global lock of a different type exists */ tdb->ecode = TDB_ERR_LOCK; return -1; } if (tdb_have_extra_locks(tdb)) { /* can't combine global and chain locks */ tdb->ecode = TDB_ERR_LOCK; return -1; } if (upgradable && ltype != F_RDLCK) { /* tdb error: you can't upgrade a write lock! */ tdb->ecode = TDB_ERR_LOCK; return -1; } return 1; } /* We only need to lock individual bytes, but Linux merges consecutive locks * so we lock in contiguous ranges. */ static int tdb_chainlock_gradual(struct tdb_context *tdb, int ltype, enum tdb_lock_flags flags, size_t off, size_t len) { int ret; enum tdb_lock_flags nb_flags = (flags & ~TDB_LOCK_WAIT); if (len <= 4) { /* Single record. Just do blocking lock. */ return tdb_brlock(tdb, ltype, off, len, flags); } /* First we try non-blocking. */ ret = tdb_brlock(tdb, ltype, off, len, nb_flags); if (ret == 0) { return 0; } /* Try locking first half, then second. */ ret = tdb_chainlock_gradual(tdb, ltype, flags, off, len / 2); if (ret == -1) return -1; ret = tdb_chainlock_gradual(tdb, ltype, flags, off + len / 2, len - len / 2); if (ret == -1) { tdb_brunlock(tdb, ltype, off, len / 2); return -1; } return 0; } /* lock/unlock entire database. It can only be upgradable if you have some * other way of guaranteeing exclusivity (ie. transaction write lock). * We do the locking gradually to avoid being starved by smaller locks. */ int tdb_allrecord_lock(struct tdb_context *tdb, int ltype, enum tdb_lock_flags flags, bool upgradable) { int ret; switch (tdb_allrecord_check(tdb, ltype, flags, upgradable)) { case -1: return -1; case 0: return 0; } /* We cover two kinds of locks: * 1) Normal chain locks. Taken for almost all operations. * 2) Individual records locks. Taken after normal or free * chain locks. * * It is (1) which cause the starvation problem, so we're only * gradual for that. */ if (tdb_have_mutexes(tdb)) { ret = tdb_mutex_allrecord_lock(tdb, ltype, flags); } else { ret = tdb_chainlock_gradual(tdb, ltype, flags, FREELIST_TOP, tdb->hash_size * 4); } if (ret == -1) { return -1; } /* Grab individual record locks. */ if (tdb_brlock(tdb, ltype, lock_offset(tdb->hash_size), 0, flags) == -1) { if (tdb_have_mutexes(tdb)) { tdb_mutex_allrecord_unlock(tdb); } else { tdb_brunlock(tdb, ltype, FREELIST_TOP, tdb->hash_size * 4); } return -1; } tdb->allrecord_lock.count = 1; /* If it's upgradable, it's actually exclusive so we can treat * it as a write lock. */ tdb->allrecord_lock.ltype = upgradable ? F_WRLCK : ltype; tdb->allrecord_lock.off = upgradable; if (tdb_needs_recovery(tdb)) { bool mark = flags & TDB_LOCK_MARK_ONLY; tdb_allrecord_unlock(tdb, ltype, mark); if (mark) { tdb->ecode = TDB_ERR_LOCK; TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_lockall_mark cannot do recovery\n")); return -1; } if (tdb_lock_and_recover(tdb) == -1) { return -1; } return tdb_allrecord_lock(tdb, ltype, flags, upgradable); } return 0; } /* unlock entire db */ int tdb_allrecord_unlock(struct tdb_context *tdb, int ltype, bool mark_lock) { /* There are no locks on read-only dbs */ if (tdb->read_only || tdb->traverse_read) { tdb->ecode = TDB_ERR_LOCK; return -1; } if (tdb->allrecord_lock.count == 0) { tdb->ecode = TDB_ERR_LOCK; return -1; } /* Upgradable locks are marked as write locks. */ if (tdb->allrecord_lock.ltype != (uint32_t)ltype && (!tdb->allrecord_lock.off || ltype != F_RDLCK)) { tdb->ecode = TDB_ERR_LOCK; return -1; } if (tdb->allrecord_lock.count > 1) { tdb->allrecord_lock.count--; return 0; } if (!mark_lock) { int ret; if (tdb_have_mutexes(tdb)) { ret = tdb_mutex_allrecord_unlock(tdb); if (ret == 0) { ret = tdb_brunlock(tdb, ltype, lock_offset(tdb->hash_size), 0); } } else { ret = tdb_brunlock(tdb, ltype, FREELIST_TOP, 0); } if (ret != 0) { TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_unlockall failed " "(%s)\n", strerror(errno))); return -1; } } tdb->allrecord_lock.count = 0; tdb->allrecord_lock.ltype = 0; return 0; } /* lock entire database with write lock */ _PUBLIC_ int tdb_lockall(struct tdb_context *tdb) { tdb_trace(tdb, "tdb_lockall"); return tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT, false); } /* lock entire database with write lock - mark only */ _PUBLIC_ int tdb_lockall_mark(struct tdb_context *tdb) { tdb_trace(tdb, "tdb_lockall_mark"); return tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_MARK_ONLY, false); } /* unlock entire database with write lock - unmark only */ _PUBLIC_ int tdb_lockall_unmark(struct tdb_context *tdb) { tdb_trace(tdb, "tdb_lockall_unmark"); return tdb_allrecord_unlock(tdb, F_WRLCK, true); } /* lock entire database with write lock - nonblocking varient */ _PUBLIC_ int tdb_lockall_nonblock(struct tdb_context *tdb) { int ret = tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_NOWAIT, false); tdb_trace_ret(tdb, "tdb_lockall_nonblock", ret); return ret; } /* unlock entire database with write lock */ _PUBLIC_ int tdb_unlockall(struct tdb_context *tdb) { tdb_trace(tdb, "tdb_unlockall"); return tdb_allrecord_unlock(tdb, F_WRLCK, false); } /* lock entire database with read lock */ _PUBLIC_ int tdb_lockall_read(struct tdb_context *tdb) { tdb_trace(tdb, "tdb_lockall_read"); return tdb_allrecord_lock(tdb, F_RDLCK, TDB_LOCK_WAIT, false); } /* lock entire database with read lock - nonblock varient */ _PUBLIC_ int tdb_lockall_read_nonblock(struct tdb_context *tdb) { int ret = tdb_allrecord_lock(tdb, F_RDLCK, TDB_LOCK_NOWAIT, false); tdb_trace_ret(tdb, "tdb_lockall_read_nonblock", ret); return ret; } /* unlock entire database with read lock */ _PUBLIC_ int tdb_unlockall_read(struct tdb_context *tdb) { tdb_trace(tdb, "tdb_unlockall_read"); return tdb_allrecord_unlock(tdb, F_RDLCK, false); } /* lock/unlock one hash chain. This is meant to be used to reduce contention - it cannot guarantee how many records will be locked */ _PUBLIC_ int tdb_chainlock(struct tdb_context *tdb, TDB_DATA key) { int ret = tdb_lock(tdb, BUCKET(tdb->hash_fn(&key)), F_WRLCK); tdb_trace_1rec(tdb, "tdb_chainlock", key); return ret; } /* lock/unlock one hash chain, non-blocking. This is meant to be used to reduce contention - it cannot guarantee how many records will be locked */ _PUBLIC_ int tdb_chainlock_nonblock(struct tdb_context *tdb, TDB_DATA key) { int ret = tdb_lock_nonblock(tdb, BUCKET(tdb->hash_fn(&key)), F_WRLCK); tdb_trace_1rec_ret(tdb, "tdb_chainlock_nonblock", key, ret); return ret; } /* mark a chain as locked without actually locking it. Warning! use with great caution! */ _PUBLIC_ int tdb_chainlock_mark(struct tdb_context *tdb, TDB_DATA key) { int ret = tdb_nest_lock(tdb, lock_offset(BUCKET(tdb->hash_fn(&key))), F_WRLCK, TDB_LOCK_MARK_ONLY); tdb_trace_1rec(tdb, "tdb_chainlock_mark", key); return ret; } /* unmark a chain as locked without actually locking it. Warning! use with great caution! */ _PUBLIC_ int tdb_chainlock_unmark(struct tdb_context *tdb, TDB_DATA key) { tdb_trace_1rec(tdb, "tdb_chainlock_unmark", key); return tdb_nest_unlock(tdb, lock_offset(BUCKET(tdb->hash_fn(&key))), F_WRLCK, true); } _PUBLIC_ int tdb_chainunlock(struct tdb_context *tdb, TDB_DATA key) { tdb_trace_1rec(tdb, "tdb_chainunlock", key); return tdb_unlock(tdb, BUCKET(tdb->hash_fn(&key)), F_WRLCK); } _PUBLIC_ int tdb_chainlock_read(struct tdb_context *tdb, TDB_DATA key) { int ret; ret = tdb_lock(tdb, BUCKET(tdb->hash_fn(&key)), F_RDLCK); tdb_trace_1rec(tdb, "tdb_chainlock_read", key); return ret; } _PUBLIC_ int tdb_chainunlock_read(struct tdb_context *tdb, TDB_DATA key) { tdb_trace_1rec(tdb, "tdb_chainunlock_read", key); return tdb_unlock(tdb, BUCKET(tdb->hash_fn(&key)), F_RDLCK); } _PUBLIC_ int tdb_chainlock_read_nonblock(struct tdb_context *tdb, TDB_DATA key) { int ret = tdb_lock_nonblock(tdb, BUCKET(tdb->hash_fn(&key)), F_RDLCK); tdb_trace_1rec_ret(tdb, "tdb_chainlock_read_nonblock", key, ret); return ret; } /* record lock stops delete underneath */ int tdb_lock_record(struct tdb_context *tdb, tdb_off_t off) { if (tdb->allrecord_lock.count) { return 0; } return off ? tdb_brlock(tdb, F_RDLCK, off, 1, TDB_LOCK_WAIT) : 0; } /* Write locks override our own fcntl readlocks, so check it here. Note this is meant to be F_SETLK, *not* F_SETLKW, as it's not an error to fail to get the lock here. */ int tdb_write_lock_record(struct tdb_context *tdb, tdb_off_t off) { struct tdb_traverse_lock *i; if (tdb == NULL) { return -1; } for (i = &tdb->travlocks; i; i = i->next) if (i->off == off) return -1; if (tdb->allrecord_lock.count) { if (tdb->allrecord_lock.ltype == F_WRLCK) { return 0; } return -1; } return tdb_brlock(tdb, F_WRLCK, off, 1, TDB_LOCK_NOWAIT|TDB_LOCK_PROBE); } int tdb_write_unlock_record(struct tdb_context *tdb, tdb_off_t off) { if (tdb->allrecord_lock.count) { return 0; } return tdb_brunlock(tdb, F_WRLCK, off, 1); } /* fcntl locks don't stack: avoid unlocking someone else's */ int tdb_unlock_record(struct tdb_context *tdb, tdb_off_t off) { struct tdb_traverse_lock *i; uint32_t count = 0; if (tdb->allrecord_lock.count) { return 0; } if (off == 0) return 0; for (i = &tdb->travlocks; i; i = i->next) if (i->off == off) count++; return (count == 1 ? tdb_brunlock(tdb, F_RDLCK, off, 1) : 0); } bool tdb_have_extra_locks(struct tdb_context *tdb) { unsigned int extra = tdb->num_lockrecs; /* A transaction holds the lock for all records. */ if (!tdb->transaction && tdb->allrecord_lock.count) { return true; } /* We always hold the active lock if CLEAR_IF_FIRST. */ if (find_nestlock(tdb, ACTIVE_LOCK)) { extra--; } /* In a transaction, we expect to hold the transaction lock */ if (tdb->transaction && find_nestlock(tdb, TRANSACTION_LOCK)) { extra--; } return extra; } /* The transaction code uses this to remove all locks. */ void tdb_release_transaction_locks(struct tdb_context *tdb) { int i; unsigned int active = 0; if (tdb->allrecord_lock.count != 0) { tdb_allrecord_unlock(tdb, tdb->allrecord_lock.ltype, false); tdb->allrecord_lock.count = 0; } for (i=0;inum_lockrecs;i++) { struct tdb_lock_type *lck = &tdb->lockrecs[i]; /* Don't release the active lock! Copy it to first entry. */ if (lck->off == ACTIVE_LOCK) { tdb->lockrecs[active++] = *lck; } else { tdb_brunlock(tdb, lck->ltype, lck->off, 1); } } tdb->num_lockrecs = active; } /* Following functions are added specifically to support CTDB. */ /* Don't do actual fcntl locking, just mark tdb locked */ int tdb_transaction_write_lock_mark(struct tdb_context *tdb); _PUBLIC_ int tdb_transaction_write_lock_mark(struct tdb_context *tdb) { return tdb_transaction_lock(tdb, F_WRLCK, TDB_LOCK_MARK_ONLY); } /* Don't do actual fcntl unlocking, just mark tdb unlocked */ int tdb_transaction_write_lock_unmark(struct tdb_context *tdb); _PUBLIC_ int tdb_transaction_write_lock_unmark(struct tdb_context *tdb) { return tdb_nest_unlock(tdb, TRANSACTION_LOCK, F_WRLCK, true); } tdb-1.4.2/common/mutex.c0000660000000000000000000005501013100601766015022 0ustar rootroot00000000000000/* Unix SMB/CIFS implementation. trivial database library Copyright (C) Volker Lendecke 2012,2013 Copyright (C) Stefan Metzmacher 2013,2014 Copyright (C) Michael Adam 2014 ** NOTE! The following LGPL license applies to the tdb ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "tdb_private.h" #include "system/threads.h" #ifdef USE_TDB_MUTEX_LOCKING /* * If we run with mutexes, we store the "struct tdb_mutexes" at the * beginning of the file. We store an additional tdb_header right * beyond the mutex area, page aligned. All the offsets within the tdb * are relative to the area behind the mutex area. tdb->map_ptr points * behind the mmap area as well, so the read and write path in the * mutex case can remain unchanged. * * Early in the mutex development the mutexes were placed between the hash * chain pointers and the real tdb data. This had two drawbacks: First, it * made pointer calculations more complex. Second, we had to mmap the mutex * area twice. One was the normal map_ptr in the tdb. This frequently changed * from within tdb_oob. At least the Linux glibc robust mutex code assumes * constant pointers in memory, so a constantly changing mmap area destroys * the mutex list. So we had to mmap the first bytes of the file with a second * mmap call. With that scheme, very weird errors happened that could be * easily fixed by doing the mutex mmap in a second file. It seemed that * mapping the same memory area twice does not end up in accessing the same * physical page, looking at the mutexes in gdb it seemed that old data showed * up after some re-mapping. To avoid a separate mutex file, the code now puts * the real content of the tdb file after the mutex area. This way we do not * have overlapping mmap areas, the mutex area is mmapped once and not * changed, the tdb data area's mmap is constantly changed but does not * overlap. */ struct tdb_mutexes { struct tdb_header hdr; /* protect allrecord_lock */ pthread_mutex_t allrecord_mutex; /* * F_UNLCK: free, * F_RDLCK: shared, * F_WRLCK: exclusive */ short int allrecord_lock; /* * Index 0 is the freelist mutex, followed by * one mutex per hashchain. */ pthread_mutex_t hashchains[1]; }; bool tdb_have_mutexes(struct tdb_context *tdb) { return ((tdb->feature_flags & TDB_FEATURE_FLAG_MUTEX) != 0); } size_t tdb_mutex_size(struct tdb_context *tdb) { size_t mutex_size; if (!tdb_have_mutexes(tdb)) { return 0; } mutex_size = sizeof(struct tdb_mutexes); mutex_size += tdb->hash_size * sizeof(pthread_mutex_t); return TDB_ALIGN(mutex_size, tdb->page_size); } /* * Get the index for a chain mutex */ static bool tdb_mutex_index(struct tdb_context *tdb, off_t off, off_t len, unsigned *idx) { /* * Weird but true: We fcntl lock 1 byte at an offset 4 bytes before * the 4 bytes of the freelist start and the hash chain that is about * to be locked. See lock_offset() where the freelist is -1 vs the * "+1" in TDB_HASH_TOP(). Because the mutex array is represented in * the tdb file itself as data, we need to adjust the offset here. */ const off_t freelist_lock_ofs = FREELIST_TOP - sizeof(tdb_off_t); if (!tdb_have_mutexes(tdb)) { return false; } if (len != 1) { /* Possibly the allrecord lock */ return false; } if (off < freelist_lock_ofs) { /* One of the special locks */ return false; } if (tdb->hash_size == 0) { /* tdb not initialized yet, called from tdb_open_ex() */ return false; } if (off >= TDB_DATA_START(tdb->hash_size)) { /* Single record lock from traverses */ return false; } /* * Now we know it's a freelist or hash chain lock. Those are always 4 * byte aligned. Paranoia check. */ if ((off % sizeof(tdb_off_t)) != 0) { abort(); } /* * Re-index the fcntl offset into an offset into the mutex array */ off -= freelist_lock_ofs; /* rebase to index 0 */ off /= sizeof(tdb_off_t); /* 0 for freelist 1-n for hashchain */ *idx = off; return true; } static bool tdb_have_mutex_chainlocks(struct tdb_context *tdb) { size_t i; for (i=0; i < tdb->num_lockrecs; i++) { bool ret; unsigned idx; ret = tdb_mutex_index(tdb, tdb->lockrecs[i].off, tdb->lockrecs[i].count, &idx); if (!ret) { continue; } if (idx == 0) { /* this is the freelist mutex */ continue; } return true; } return false; } static int chain_mutex_lock(pthread_mutex_t *m, bool waitflag) { int ret; if (waitflag) { ret = pthread_mutex_lock(m); } else { ret = pthread_mutex_trylock(m); } if (ret != EOWNERDEAD) { return ret; } /* * For chainlocks, we don't do any cleanup (yet?) */ return pthread_mutex_consistent(m); } static int allrecord_mutex_lock(struct tdb_mutexes *m, bool waitflag) { int ret; if (waitflag) { ret = pthread_mutex_lock(&m->allrecord_mutex); } else { ret = pthread_mutex_trylock(&m->allrecord_mutex); } if (ret != EOWNERDEAD) { return ret; } /* * The allrecord lock holder died. We need to reset the allrecord_lock * to F_UNLCK. This should also be the indication for * tdb_needs_recovery. */ m->allrecord_lock = F_UNLCK; return pthread_mutex_consistent(&m->allrecord_mutex); } bool tdb_mutex_lock(struct tdb_context *tdb, int rw, off_t off, off_t len, bool waitflag, int *pret) { struct tdb_mutexes *m = tdb->mutexes; pthread_mutex_t *chain; int ret; unsigned idx; bool allrecord_ok; if (!tdb_mutex_index(tdb, off, len, &idx)) { return false; } chain = &m->hashchains[idx]; again: ret = chain_mutex_lock(chain, waitflag); if (ret == EBUSY) { ret = EAGAIN; } if (ret != 0) { errno = ret; goto fail; } if (idx == 0) { /* * This is a freelist lock, which is independent to * the allrecord lock. So we're done once we got the * freelist mutex. */ *pret = 0; return true; } if (tdb_have_mutex_chainlocks(tdb)) { /* * We can only check the allrecord lock once. If we do it with * one chain mutex locked, we will deadlock with the allrecord * locker process in the following way: We lock the first hash * chain, we check for the allrecord lock. We keep the hash * chain locked. Then the allrecord locker locks the * allrecord_mutex. It walks the list of chain mutexes, * locking them all in sequence. Meanwhile, we have the chain * mutex locked, so the allrecord locker blocks trying to lock * our chain mutex. Then we come in and try to lock the second * chain lock, which in most cases will be the freelist. We * see that the allrecord lock is locked and put ourselves on * the allrecord_mutex. This will never be signalled though * because the allrecord locker waits for us to give up the * chain lock. */ *pret = 0; return true; } /* * Check if someone is has the allrecord lock: queue if so. */ allrecord_ok = false; if (m->allrecord_lock == F_UNLCK) { /* * allrecord lock not taken */ allrecord_ok = true; } if ((m->allrecord_lock == F_RDLCK) && (rw == F_RDLCK)) { /* * allrecord shared lock taken, but we only want to read */ allrecord_ok = true; } if (allrecord_ok) { *pret = 0; return true; } ret = pthread_mutex_unlock(chain); if (ret != 0) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "pthread_mutex_unlock" "(chain_mutex) failed: %s\n", strerror(ret))); errno = ret; goto fail; } ret = allrecord_mutex_lock(m, waitflag); if (ret == EBUSY) { ret = EAGAIN; } if (ret != 0) { if (waitflag || (ret != EAGAIN)) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "pthread_mutex_%slock" "(allrecord_mutex) failed: %s\n", waitflag ? "" : "try_", strerror(ret))); } errno = ret; goto fail; } ret = pthread_mutex_unlock(&m->allrecord_mutex); if (ret != 0) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "pthread_mutex_unlock" "(allrecord_mutex) failed: %s\n", strerror(ret))); errno = ret; goto fail; } goto again; fail: *pret = -1; return true; } bool tdb_mutex_unlock(struct tdb_context *tdb, int rw, off_t off, off_t len, int *pret) { struct tdb_mutexes *m = tdb->mutexes; pthread_mutex_t *chain; int ret; unsigned idx; if (!tdb_mutex_index(tdb, off, len, &idx)) { return false; } chain = &m->hashchains[idx]; ret = pthread_mutex_unlock(chain); if (ret == 0) { *pret = 0; return true; } errno = ret; *pret = -1; return true; } int tdb_mutex_allrecord_lock(struct tdb_context *tdb, int ltype, enum tdb_lock_flags flags) { struct tdb_mutexes *m = tdb->mutexes; int ret; uint32_t i; bool waitflag = (flags & TDB_LOCK_WAIT); int saved_errno; if (tdb->flags & TDB_NOLOCK) { return 0; } if (flags & TDB_LOCK_MARK_ONLY) { return 0; } ret = allrecord_mutex_lock(m, waitflag); if (!waitflag && (ret == EBUSY)) { errno = EAGAIN; tdb->ecode = TDB_ERR_LOCK; return -1; } if (ret != 0) { if (!(flags & TDB_LOCK_PROBE)) { TDB_LOG((tdb, TDB_DEBUG_TRACE, "allrecord_mutex_lock() failed: %s\n", strerror(ret))); } tdb->ecode = TDB_ERR_LOCK; return -1; } if (m->allrecord_lock != F_UNLCK) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "allrecord_lock == %d\n", (int)m->allrecord_lock)); goto fail_unlock_allrecord_mutex; } m->allrecord_lock = (ltype == F_RDLCK) ? F_RDLCK : F_WRLCK; for (i=0; ihash_size; i++) { /* ignore hashchains[0], the freelist */ pthread_mutex_t *chain = &m->hashchains[i+1]; ret = chain_mutex_lock(chain, waitflag); if (!waitflag && (ret == EBUSY)) { errno = EAGAIN; goto fail_unroll_allrecord_lock; } if (ret != 0) { if (!(flags & TDB_LOCK_PROBE)) { TDB_LOG((tdb, TDB_DEBUG_TRACE, "chain_mutex_lock() failed: %s\n", strerror(ret))); } errno = ret; goto fail_unroll_allrecord_lock; } ret = pthread_mutex_unlock(chain); if (ret != 0) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "pthread_mutex_unlock" "(chainlock) failed: %s\n", strerror(ret))); errno = ret; goto fail_unroll_allrecord_lock; } } /* * We leave this routine with m->allrecord_mutex locked */ return 0; fail_unroll_allrecord_lock: m->allrecord_lock = F_UNLCK; fail_unlock_allrecord_mutex: saved_errno = errno; ret = pthread_mutex_unlock(&m->allrecord_mutex); if (ret != 0) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "pthread_mutex_unlock" "(allrecord_mutex) failed: %s\n", strerror(ret))); } errno = saved_errno; tdb->ecode = TDB_ERR_LOCK; return -1; } int tdb_mutex_allrecord_upgrade(struct tdb_context *tdb) { struct tdb_mutexes *m = tdb->mutexes; int ret; uint32_t i; if (tdb->flags & TDB_NOLOCK) { return 0; } /* * Our only caller tdb_allrecord_upgrade() * garantees that we already own the allrecord lock. * * Which means m->allrecord_mutex is still locked by us. */ if (m->allrecord_lock != F_RDLCK) { tdb->ecode = TDB_ERR_LOCK; TDB_LOG((tdb, TDB_DEBUG_FATAL, "allrecord_lock == %d\n", (int)m->allrecord_lock)); return -1; } m->allrecord_lock = F_WRLCK; for (i=0; ihash_size; i++) { /* ignore hashchains[0], the freelist */ pthread_mutex_t *chain = &m->hashchains[i+1]; ret = chain_mutex_lock(chain, true); if (ret != 0) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "pthread_mutex_lock" "(chainlock) failed: %s\n", strerror(ret))); goto fail_unroll_allrecord_lock; } ret = pthread_mutex_unlock(chain); if (ret != 0) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "pthread_mutex_unlock" "(chainlock) failed: %s\n", strerror(ret))); goto fail_unroll_allrecord_lock; } } return 0; fail_unroll_allrecord_lock: m->allrecord_lock = F_RDLCK; tdb->ecode = TDB_ERR_LOCK; return -1; } void tdb_mutex_allrecord_downgrade(struct tdb_context *tdb) { struct tdb_mutexes *m = tdb->mutexes; /* * Our only caller tdb_allrecord_upgrade() (in the error case) * garantees that we already own the allrecord lock. * * Which means m->allrecord_mutex is still locked by us. */ if (m->allrecord_lock != F_WRLCK) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "allrecord_lock == %d\n", (int)m->allrecord_lock)); return; } m->allrecord_lock = F_RDLCK; return; } int tdb_mutex_allrecord_unlock(struct tdb_context *tdb) { struct tdb_mutexes *m = tdb->mutexes; short old; int ret; if (tdb->flags & TDB_NOLOCK) { return 0; } /* * Our only callers tdb_allrecord_unlock() and * tdb_allrecord_lock() (in the error path) * garantee that we already own the allrecord lock. * * Which means m->allrecord_mutex is still locked by us. */ if ((m->allrecord_lock != F_RDLCK) && (m->allrecord_lock != F_WRLCK)) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "allrecord_lock == %d\n", (int)m->allrecord_lock)); return -1; } old = m->allrecord_lock; m->allrecord_lock = F_UNLCK; ret = pthread_mutex_unlock(&m->allrecord_mutex); if (ret != 0) { m->allrecord_lock = old; TDB_LOG((tdb, TDB_DEBUG_FATAL, "pthread_mutex_unlock" "(allrecord_mutex) failed: %s\n", strerror(ret))); return -1; } return 0; } int tdb_mutex_init(struct tdb_context *tdb) { struct tdb_mutexes *m; pthread_mutexattr_t ma; int i, ret; ret = tdb_mutex_mmap(tdb); if (ret == -1) { return -1; } m = tdb->mutexes; ret = pthread_mutexattr_init(&ma); if (ret != 0) { goto fail_munmap; } ret = pthread_mutexattr_settype(&ma, PTHREAD_MUTEX_ERRORCHECK); if (ret != 0) { goto fail; } ret = pthread_mutexattr_setpshared(&ma, PTHREAD_PROCESS_SHARED); if (ret != 0) { goto fail; } ret = pthread_mutexattr_setrobust(&ma, PTHREAD_MUTEX_ROBUST); if (ret != 0) { goto fail; } for (i=0; ihash_size+1; i++) { pthread_mutex_t *chain = &m->hashchains[i]; ret = pthread_mutex_init(chain, &ma); if (ret != 0) { goto fail; } } m->allrecord_lock = F_UNLCK; ret = pthread_mutex_init(&m->allrecord_mutex, &ma); if (ret != 0) { goto fail; } ret = 0; fail: pthread_mutexattr_destroy(&ma); fail_munmap: if (ret == 0) { return 0; } tdb_mutex_munmap(tdb); errno = ret; return -1; } int tdb_mutex_mmap(struct tdb_context *tdb) { size_t len; void *ptr; len = tdb_mutex_size(tdb); if (len == 0) { return 0; } if (tdb->mutexes != NULL) { return 0; } ptr = mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_SHARED|MAP_FILE, tdb->fd, 0); if (ptr == MAP_FAILED) { return -1; } tdb->mutexes = (struct tdb_mutexes *)ptr; return 0; } int tdb_mutex_munmap(struct tdb_context *tdb) { size_t len; int ret; len = tdb_mutex_size(tdb); if (len == 0) { return 0; } ret = munmap(tdb->mutexes, len); if (ret == -1) { return -1; } tdb->mutexes = NULL; return 0; } static bool tdb_mutex_locking_cached; static bool tdb_mutex_locking_supported(void) { pthread_mutexattr_t ma; pthread_mutex_t m; int ret; static bool initialized; if (initialized) { return tdb_mutex_locking_cached; } initialized = true; ret = pthread_mutexattr_init(&ma); if (ret != 0) { return false; } ret = pthread_mutexattr_settype(&ma, PTHREAD_MUTEX_ERRORCHECK); if (ret != 0) { goto cleanup_ma; } ret = pthread_mutexattr_setpshared(&ma, PTHREAD_PROCESS_SHARED); if (ret != 0) { goto cleanup_ma; } ret = pthread_mutexattr_setrobust(&ma, PTHREAD_MUTEX_ROBUST); if (ret != 0) { goto cleanup_ma; } ret = pthread_mutex_init(&m, &ma); if (ret != 0) { goto cleanup_ma; } ret = pthread_mutex_lock(&m); if (ret != 0) { goto cleanup_m; } /* * This makes sure we have real mutexes * from a threading library instead of just * stubs from libc. */ ret = pthread_mutex_lock(&m); if (ret != EDEADLK) { goto cleanup_lock; } ret = pthread_mutex_unlock(&m); if (ret != 0) { goto cleanup_m; } tdb_mutex_locking_cached = true; goto cleanup_m; cleanup_lock: pthread_mutex_unlock(&m); cleanup_m: pthread_mutex_destroy(&m); cleanup_ma: pthread_mutexattr_destroy(&ma); return tdb_mutex_locking_cached; } static void (*tdb_robust_mutext_old_handler)(int) = SIG_ERR; static pid_t tdb_robust_mutex_pid = -1; static bool tdb_robust_mutex_setup_sigchild(void (*handler)(int), void (**p_old_handler)(int)) { #ifdef HAVE_SIGACTION struct sigaction act; struct sigaction oldact; memset(&act, '\0', sizeof(act)); act.sa_handler = handler; #ifdef SA_RESTART act.sa_flags = SA_RESTART; #endif sigemptyset(&act.sa_mask); sigaddset(&act.sa_mask, SIGCHLD); sigaction(SIGCHLD, &act, &oldact); if (p_old_handler) { *p_old_handler = oldact.sa_handler; } return true; #else /* !HAVE_SIGACTION */ return false; #endif } static void tdb_robust_mutex_handler(int sig) { pid_t child_pid = tdb_robust_mutex_pid; if (child_pid != -1) { pid_t pid; pid = waitpid(child_pid, NULL, WNOHANG); if (pid == -1) { switch (errno) { case ECHILD: tdb_robust_mutex_pid = -1; return; default: return; } } if (pid == child_pid) { tdb_robust_mutex_pid = -1; return; } } if (tdb_robust_mutext_old_handler == SIG_DFL) { return; } if (tdb_robust_mutext_old_handler == SIG_IGN) { return; } if (tdb_robust_mutext_old_handler == SIG_ERR) { return; } tdb_robust_mutext_old_handler(sig); } static void tdb_robust_mutex_wait_for_child(pid_t *child_pid) { int options = WNOHANG; if (*child_pid == -1) { return; } while (tdb_robust_mutex_pid > 0) { pid_t pid; /* * First we try with WNOHANG, as the process might not exist * anymore. Once we've sent SIGKILL we block waiting for the * exit. */ pid = waitpid(*child_pid, NULL, options); if (pid == -1) { if (errno == EINTR) { continue; } else if (errno == ECHILD) { break; } else { abort(); } } if (pid == *child_pid) { break; } kill(*child_pid, SIGKILL); options = 0; } tdb_robust_mutex_pid = -1; *child_pid = -1; } _PUBLIC_ bool tdb_runtime_check_for_robust_mutexes(void) { void *ptr = NULL; pthread_mutex_t *m = NULL; pthread_mutexattr_t ma; int ret = 1; int pipe_down[2] = { -1, -1 }; int pipe_up[2] = { -1, -1 }; ssize_t nread; char c = 0; bool ok; static bool initialized; pid_t saved_child_pid = -1; bool cleanup_ma = false; if (initialized) { return tdb_mutex_locking_cached; } initialized = true; ok = tdb_mutex_locking_supported(); if (!ok) { return false; } tdb_mutex_locking_cached = false; ptr = mmap(NULL, sizeof(pthread_mutex_t), PROT_READ|PROT_WRITE, MAP_SHARED|MAP_ANON, -1 /* fd */, 0); if (ptr == MAP_FAILED) { return false; } ret = pipe(pipe_down); if (ret != 0) { goto cleanup; } ret = pipe(pipe_up); if (ret != 0) { goto cleanup; } ret = pthread_mutexattr_init(&ma); if (ret != 0) { goto cleanup; } cleanup_ma = true; ret = pthread_mutexattr_settype(&ma, PTHREAD_MUTEX_ERRORCHECK); if (ret != 0) { goto cleanup; } ret = pthread_mutexattr_setpshared(&ma, PTHREAD_PROCESS_SHARED); if (ret != 0) { goto cleanup; } ret = pthread_mutexattr_setrobust(&ma, PTHREAD_MUTEX_ROBUST); if (ret != 0) { goto cleanup; } ret = pthread_mutex_init(ptr, &ma); if (ret != 0) { goto cleanup; } m = (pthread_mutex_t *)ptr; if (tdb_robust_mutex_setup_sigchild(tdb_robust_mutex_handler, &tdb_robust_mutext_old_handler) == false) { goto cleanup; } tdb_robust_mutex_pid = fork(); saved_child_pid = tdb_robust_mutex_pid; if (tdb_robust_mutex_pid == 0) { size_t nwritten; close(pipe_down[1]); close(pipe_up[0]); ret = pthread_mutex_lock(m); nwritten = write(pipe_up[1], &ret, sizeof(ret)); if (nwritten != sizeof(ret)) { _exit(1); } if (ret != 0) { _exit(1); } nread = read(pipe_down[0], &c, 1); if (nread != 1) { _exit(1); } /* leave locked */ _exit(0); } if (tdb_robust_mutex_pid == -1) { goto cleanup; } close(pipe_down[0]); pipe_down[0] = -1; close(pipe_up[1]); pipe_up[1] = -1; nread = read(pipe_up[0], &ret, sizeof(ret)); if (nread != sizeof(ret)) { goto cleanup; } ret = pthread_mutex_trylock(m); if (ret != EBUSY) { if (ret == 0) { pthread_mutex_unlock(m); } goto cleanup; } if (write(pipe_down[1], &c, 1) != 1) { goto cleanup; } nread = read(pipe_up[0], &c, 1); if (nread != 0) { goto cleanup; } tdb_robust_mutex_wait_for_child(&saved_child_pid); ret = pthread_mutex_trylock(m); if (ret != EOWNERDEAD) { if (ret == 0) { pthread_mutex_unlock(m); } goto cleanup; } ret = pthread_mutex_consistent(m); if (ret != 0) { goto cleanup; } ret = pthread_mutex_trylock(m); if (ret != EDEADLK && ret != EBUSY) { pthread_mutex_unlock(m); goto cleanup; } ret = pthread_mutex_unlock(m); if (ret != 0) { goto cleanup; } tdb_mutex_locking_cached = true; cleanup: /* * Note that we don't reset the signal handler we just reset * tdb_robust_mutex_pid to -1. This is ok as this code path is only * called once per process. * * Leaving our signal handler avoids races with other threads potentialy * setting up their SIGCHLD handlers. * * The worst thing that can happen is that the other newer signal * handler will get the SIGCHLD signal for our child and/or reap the * child with a wait() function. tdb_robust_mutex_wait_for_child() * handles the case where waitpid returns ECHILD. */ tdb_robust_mutex_wait_for_child(&saved_child_pid); if (m != NULL) { pthread_mutex_destroy(m); } if (cleanup_ma) { pthread_mutexattr_destroy(&ma); } if (pipe_down[0] != -1) { close(pipe_down[0]); } if (pipe_down[1] != -1) { close(pipe_down[1]); } if (pipe_up[0] != -1) { close(pipe_up[0]); } if (pipe_up[1] != -1) { close(pipe_up[1]); } if (ptr != NULL) { munmap(ptr, sizeof(pthread_mutex_t)); } return tdb_mutex_locking_cached; } #else size_t tdb_mutex_size(struct tdb_context *tdb) { return 0; } bool tdb_have_mutexes(struct tdb_context *tdb) { return false; } int tdb_mutex_allrecord_lock(struct tdb_context *tdb, int ltype, enum tdb_lock_flags flags) { tdb->ecode = TDB_ERR_LOCK; return -1; } int tdb_mutex_allrecord_unlock(struct tdb_context *tdb) { return -1; } int tdb_mutex_allrecord_upgrade(struct tdb_context *tdb) { tdb->ecode = TDB_ERR_LOCK; return -1; } void tdb_mutex_allrecord_downgrade(struct tdb_context *tdb) { return; } int tdb_mutex_mmap(struct tdb_context *tdb) { errno = ENOSYS; return -1; } int tdb_mutex_munmap(struct tdb_context *tdb) { errno = ENOSYS; return -1; } int tdb_mutex_init(struct tdb_context *tdb) { errno = ENOSYS; return -1; } _PUBLIC_ bool tdb_runtime_check_for_robust_mutexes(void) { return false; } #endif tdb-1.4.2/common/open.c0000660000000000000000000006055013527011454014630 0ustar rootroot00000000000000 /* Unix SMB/CIFS implementation. trivial database library Copyright (C) Andrew Tridgell 1999-2005 Copyright (C) Paul `Rusty' Russell 2000 Copyright (C) Jeremy Allison 2000-2003 ** NOTE! The following LGPL license applies to the tdb ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "tdb_private.h" /* all contexts, to ensure no double-opens (fcntl locks don't nest!) */ static struct tdb_context *tdbs = NULL; /* We use two hashes to double-check they're using the right hash function. */ void tdb_header_hash(struct tdb_context *tdb, uint32_t *magic1_hash, uint32_t *magic2_hash) { TDB_DATA hash_key; uint32_t tdb_magic = TDB_MAGIC; hash_key.dptr = discard_const_p(unsigned char, TDB_MAGIC_FOOD); hash_key.dsize = sizeof(TDB_MAGIC_FOOD); *magic1_hash = tdb->hash_fn(&hash_key); hash_key.dptr = (unsigned char *)CONVERT(tdb_magic); hash_key.dsize = sizeof(tdb_magic); *magic2_hash = tdb->hash_fn(&hash_key); /* Make sure at least one hash is non-zero! */ if (*magic1_hash == 0 && *magic2_hash == 0) *magic1_hash = 1; } /* initialise a new database with a specified hash size */ static int tdb_new_database(struct tdb_context *tdb, struct tdb_header *header, int hash_size) { struct tdb_header *newdb; size_t size; int ret = -1; /* We make it up in memory, then write it out if not internal */ size = sizeof(struct tdb_header) + (hash_size+1)*sizeof(tdb_off_t); if (!(newdb = (struct tdb_header *)calloc(size, 1))) { tdb->ecode = TDB_ERR_OOM; return -1; } /* Fill in the header */ newdb->version = TDB_VERSION; newdb->hash_size = hash_size; tdb_header_hash(tdb, &newdb->magic1_hash, &newdb->magic2_hash); /* Make sure older tdbs (which don't check the magic hash fields) * will refuse to open this TDB. */ if (tdb->flags & TDB_INCOMPATIBLE_HASH) newdb->rwlocks = TDB_HASH_RWLOCK_MAGIC; /* * We create a tdb with TDB_FEATURE_FLAG_MUTEX support, * the flag combination and runtime feature checks * are done by the caller already. */ if (tdb->flags & TDB_MUTEX_LOCKING) { newdb->feature_flags |= TDB_FEATURE_FLAG_MUTEX; } /* * If we have any features we add the FEATURE_FLAG_MAGIC, overwriting the * TDB_HASH_RWLOCK_MAGIC above. */ if (newdb->feature_flags != 0) { newdb->rwlocks = TDB_FEATURE_FLAG_MAGIC; } /* * It's required for some following code pathes * to have the fields on 'tdb' up-to-date. * * E.g. tdb_mutex_size() requires it */ tdb->feature_flags = newdb->feature_flags; tdb->hash_size = newdb->hash_size; if (tdb->flags & TDB_INTERNAL) { tdb->map_size = size; tdb->map_ptr = (char *)newdb; memcpy(header, newdb, sizeof(*header)); /* Convert the `ondisk' version if asked. */ CONVERT(*newdb); return 0; } if (lseek(tdb->fd, 0, SEEK_SET) == -1) goto fail; if (ftruncate(tdb->fd, 0) == -1) goto fail; if (newdb->feature_flags & TDB_FEATURE_FLAG_MUTEX) { newdb->mutex_size = tdb_mutex_size(tdb); tdb->hdr_ofs = newdb->mutex_size; } /* This creates an endian-converted header, as if read from disk */ CONVERT(*newdb); memcpy(header, newdb, sizeof(*header)); /* Don't endian-convert the magic food! */ memcpy(newdb->magic_food, TDB_MAGIC_FOOD, strlen(TDB_MAGIC_FOOD)+1); if (!tdb_write_all(tdb->fd, newdb, size)) goto fail; if (newdb->feature_flags & TDB_FEATURE_FLAG_MUTEX) { /* * Now we init the mutex area * followed by a second header. */ ret = ftruncate( tdb->fd, newdb->mutex_size + sizeof(struct tdb_header)); if (ret == -1) { goto fail; } ret = tdb_mutex_init(tdb); if (ret == -1) { goto fail; } /* * Write a second header behind the mutexes. That's the area * that will be mmapp'ed. */ ret = lseek(tdb->fd, newdb->mutex_size, SEEK_SET); if (ret == -1) { goto fail; } if (!tdb_write_all(tdb->fd, newdb, size)) { goto fail; } } ret = 0; fail: SAFE_FREE(newdb); return ret; } static int tdb_already_open(dev_t device, ino_t ino) { struct tdb_context *i; for (i = tdbs; i; i = i->next) { if (i->device == device && i->inode == ino) { return 1; } } return 0; } /* open the database, creating it if necessary The open_flags and mode are passed straight to the open call on the database file. A flags value of O_WRONLY is invalid. The hash size is advisory, use zero for a default value. Return is NULL on error, in which case errno is also set. Don't try to call tdb_error or tdb_errname, just do strerror(errno). @param name may be NULL for internal databases. */ _PUBLIC_ struct tdb_context *tdb_open(const char *name, int hash_size, int tdb_flags, int open_flags, mode_t mode) { return tdb_open_ex(name, hash_size, tdb_flags, open_flags, mode, NULL, NULL); } /* a default logging function */ static void null_log_fn(struct tdb_context *tdb, enum tdb_debug_level level, const char *fmt, ...) PRINTF_ATTRIBUTE(3, 4); static void null_log_fn(struct tdb_context *tdb, enum tdb_debug_level level, const char *fmt, ...) { } static bool check_header_hash(struct tdb_context *tdb, struct tdb_header *header, bool default_hash, uint32_t *m1, uint32_t *m2) { tdb_header_hash(tdb, m1, m2); if (header->magic1_hash == *m1 && header->magic2_hash == *m2) { return true; } /* If they explicitly set a hash, always respect it. */ if (!default_hash) return false; /* Otherwise, try the other inbuilt hash. */ if (tdb->hash_fn == tdb_old_hash) tdb->hash_fn = tdb_jenkins_hash; else tdb->hash_fn = tdb_old_hash; return check_header_hash(tdb, header, false, m1, m2); } static bool tdb_mutex_open_ok(struct tdb_context *tdb, const struct tdb_header *header) { if (tdb->flags & TDB_NOLOCK) { /* * We don't look at locks, so it does not matter to have a * compatible mutex implementation. Allow the open. */ return true; } if (!(tdb->flags & TDB_MUTEX_LOCKING)) { TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_mutex_open_ok[%s]: " "Can use mutexes only with " "MUTEX_LOCKING or NOLOCK\n", tdb->name)); return false; } if (tdb_mutex_size(tdb) != header->mutex_size) { TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_mutex_open_ok[%s]: " "Mutex size changed from %"PRIu32" to %zu\n.", tdb->name, header->mutex_size, tdb_mutex_size(tdb))); return false; } return true; } _PUBLIC_ struct tdb_context *tdb_open_ex(const char *name, int hash_size, int tdb_flags, int open_flags, mode_t mode, const struct tdb_logging_context *log_ctx, tdb_hash_func hash_fn) { int orig_errno = errno; struct tdb_header header = { .version = 0, }; struct tdb_context *tdb; struct stat st; int rev = 0; bool locked = false; unsigned char *vp; uint32_t vertest; unsigned v; const char *hash_alg; uint32_t magic1, magic2; int ret; if (!(tdb = (struct tdb_context *)calloc(1, sizeof *tdb))) { /* Can't log this */ errno = ENOMEM; goto fail; } tdb_io_init(tdb); if (tdb_flags & TDB_INTERNAL) { tdb_flags |= TDB_INCOMPATIBLE_HASH; } if (tdb_flags & TDB_MUTEX_LOCKING) { tdb_flags |= TDB_INCOMPATIBLE_HASH; } tdb->fd = -1; #ifdef TDB_TRACE tdb->tracefd = -1; #endif tdb->name = NULL; tdb->map_ptr = NULL; tdb->flags = tdb_flags; tdb->open_flags = open_flags; if (log_ctx) { tdb->log = *log_ctx; } else { tdb->log.log_fn = null_log_fn; tdb->log.log_private = NULL; } if (name == NULL && (tdb_flags & TDB_INTERNAL)) { name = "__TDB_INTERNAL__"; } if (name == NULL) { tdb->name = discard_const_p(char, "__NULL__"); TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_open_ex: called with name == NULL\n")); tdb->name = NULL; errno = EINVAL; goto fail; } /* now make a copy of the name, as the caller memory might go away */ if (!(tdb->name = (char *)strdup(name))) { /* * set the name as the given string, so that tdb_name() will * work in case of an error. */ tdb->name = discard_const_p(char, name); TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: can't strdup(%s)\n", name)); tdb->name = NULL; errno = ENOMEM; goto fail; } if (hash_fn) { tdb->hash_fn = hash_fn; hash_alg = "the user defined"; } else { /* This controls what we use when creating a tdb. */ if (tdb->flags & TDB_INCOMPATIBLE_HASH) { tdb->hash_fn = tdb_jenkins_hash; } else { tdb->hash_fn = tdb_old_hash; } hash_alg = "either default"; } /* cache the page size */ tdb->page_size = getpagesize(); if (tdb->page_size <= 0) { tdb->page_size = 0x2000; } tdb->max_dead_records = (tdb_flags & TDB_VOLATILE) ? 5 : 0; if ((open_flags & O_ACCMODE) == O_WRONLY) { TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: can't open tdb %s write-only\n", name)); errno = EINVAL; goto fail; } if (hash_size == 0) hash_size = DEFAULT_HASH_SIZE; if ((open_flags & O_ACCMODE) == O_RDONLY) { tdb->read_only = 1; /* read only databases don't do locking or clear if first */ tdb->flags |= TDB_NOLOCK; tdb->flags &= ~(TDB_CLEAR_IF_FIRST|TDB_MUTEX_LOCKING); } if ((tdb->flags & TDB_ALLOW_NESTING) && (tdb->flags & TDB_DISALLOW_NESTING)) { tdb->ecode = TDB_ERR_NESTING; TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_open_ex: " "allow_nesting and disallow_nesting are not allowed together!")); errno = EINVAL; goto fail; } if (tdb->flags & TDB_MUTEX_LOCKING) { /* * Here we catch bugs in the callers, * the runtime check for existing tdb's comes later. */ if (tdb->flags & TDB_INTERNAL) { TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: " "invalid flags for %s - TDB_MUTEX_LOCKING and " "TDB_INTERNAL are not allowed together\n", name)); errno = EINVAL; goto fail; } if (tdb->flags & TDB_NOMMAP) { TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: " "invalid flags for %s - TDB_MUTEX_LOCKING and " "TDB_NOMMAP are not allowed together\n", name)); errno = EINVAL; goto fail; } if (tdb->read_only) { TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: " "invalid flags for %s - TDB_MUTEX_LOCKING " "not allowed read only\n", name)); errno = EINVAL; goto fail; } /* * The callers should have called * tdb_runtime_check_for_robust_mutexes() * before using TDB_MUTEX_LOCKING! * * This makes sure the caller understands * that the locking may behave a bit differently * than with pure fcntl locking. E.g. multiple * read locks are not supported. */ if (!tdb_runtime_check_for_robust_mutexes()) { TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: " "invalid flags for %s - TDB_MUTEX_LOCKING " "requires support for robust_mutexes\n", name)); errno = ENOSYS; goto fail; } } if (getenv("TDB_NO_FSYNC")) { tdb->flags |= TDB_NOSYNC; } /* * TDB_ALLOW_NESTING is the default behavior. * Note: this may change in future versions! */ if (!(tdb->flags & TDB_DISALLOW_NESTING)) { tdb->flags |= TDB_ALLOW_NESTING; } /* internal databases don't mmap or lock, and start off cleared */ if (tdb->flags & TDB_INTERNAL) { tdb->flags |= (TDB_NOLOCK | TDB_NOMMAP); tdb->flags &= ~TDB_CLEAR_IF_FIRST; if (tdb_new_database(tdb, &header, hash_size) != 0) { TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: tdb_new_database failed!")); goto fail; } tdb->hash_size = hash_size; goto internal; } if ((tdb->fd = open(name, open_flags, mode)) == -1) { TDB_LOG((tdb, TDB_DEBUG_WARNING, "tdb_open_ex: could not open file %s: %s\n", name, strerror(errno))); goto fail; /* errno set by open(2) */ } /* on exec, don't inherit the fd */ v = fcntl(tdb->fd, F_GETFD, 0); fcntl(tdb->fd, F_SETFD, v | FD_CLOEXEC); /* ensure there is only one process initialising at once */ if (tdb_nest_lock(tdb, OPEN_LOCK, F_WRLCK, TDB_LOCK_WAIT) == -1) { TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: failed to get open lock on %s: %s\n", name, strerror(errno))); goto fail; /* errno set by tdb_brlock */ } /* we need to zero database if we are the only one with it open */ if ((tdb_flags & TDB_CLEAR_IF_FIRST) && (!tdb->read_only)) { ret = tdb_nest_lock(tdb, ACTIVE_LOCK, F_WRLCK, TDB_LOCK_NOWAIT|TDB_LOCK_PROBE); locked = (ret == 0); if (locked) { ret = tdb_brlock(tdb, F_WRLCK, FREELIST_TOP, 0, TDB_LOCK_WAIT); if (ret == -1) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_open_ex: " "tdb_brlock failed for %s: %s\n", name, strerror(errno))); goto fail; } ret = tdb_new_database(tdb, &header, hash_size); if (ret == -1) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_open_ex: " "tdb_new_database failed for " "%s: %s\n", name, strerror(errno))); tdb_unlockall(tdb); goto fail; } ret = tdb_brunlock(tdb, F_WRLCK, FREELIST_TOP, 0); if (ret == -1) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_open_ex: " "tdb_unlockall failed for %s: %s\n", name, strerror(errno))); goto fail; } ret = lseek(tdb->fd, 0, SEEK_SET); if (ret == -1) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_open_ex: " "lseek failed for %s: %s\n", name, strerror(errno))); goto fail; } } } errno = 0; if (read(tdb->fd, &header, sizeof(header)) != sizeof(header) || strcmp(header.magic_food, TDB_MAGIC_FOOD) != 0) { if (!(open_flags & O_CREAT) || tdb_new_database(tdb, &header, hash_size) == -1) { if (errno == 0) { errno = EIO; /* ie bad format or something */ } goto fail; } rev = (tdb->flags & TDB_CONVERT); } else if (header.version != TDB_VERSION && !(rev = (header.version==TDB_BYTEREV(TDB_VERSION)))) { /* wrong version */ errno = EIO; goto fail; } vp = (unsigned char *)&header.version; vertest = (((uint32_t)vp[0]) << 24) | (((uint32_t)vp[1]) << 16) | (((uint32_t)vp[2]) << 8) | (uint32_t)vp[3]; tdb->flags |= (vertest==TDB_VERSION) ? TDB_BIGENDIAN : 0; if (!rev) tdb->flags &= ~TDB_CONVERT; else { tdb->flags |= TDB_CONVERT; tdb_convert(&header, sizeof(header)); } /* * We only use st.st_dev and st.st_ino from the raw fstat() * call, everything else needs to use tdb_fstat() in order * to skip tdb->hdr_ofs! */ if (fstat(tdb->fd, &st) == -1) { goto fail; } tdb->device = st.st_dev; tdb->inode = st.st_ino; ZERO_STRUCT(st); if (header.rwlocks != 0 && header.rwlocks != TDB_FEATURE_FLAG_MAGIC && header.rwlocks != TDB_HASH_RWLOCK_MAGIC) { TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: spinlocks no longer supported\n")); errno = ENOSYS; goto fail; } if (header.hash_size == 0) { TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: invalid database: 0 hash_size\n")); errno = ENOSYS; goto fail; } tdb->hash_size = header.hash_size; if (header.rwlocks == TDB_FEATURE_FLAG_MAGIC) { tdb->feature_flags = header.feature_flags; } if (tdb->feature_flags & ~TDB_SUPPORTED_FEATURE_FLAGS) { TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: unsupported " "features in tdb %s: 0x%08x (supported: 0x%08x)\n", name, (unsigned)tdb->feature_flags, (unsigned)TDB_SUPPORTED_FEATURE_FLAGS)); errno = ENOSYS; goto fail; } if (tdb->feature_flags & TDB_FEATURE_FLAG_MUTEX) { if (!tdb_mutex_open_ok(tdb, &header)) { errno = EINVAL; goto fail; } /* * We need to remember the hdr_ofs * also for the TDB_NOLOCK case * if the current library doesn't support * mutex locking. */ tdb->hdr_ofs = header.mutex_size; if ((!(tdb_flags & TDB_CLEAR_IF_FIRST)) && (!tdb->read_only)) { /* * Open an existing mutexed tdb, but without * CLEAR_IF_FIRST. We need to initialize the * mutex array and keep the CLEAR_IF_FIRST * lock locked. */ ret = tdb_nest_lock(tdb, ACTIVE_LOCK, F_WRLCK, TDB_LOCK_NOWAIT|TDB_LOCK_PROBE); locked = (ret == 0); if (locked) { ret = tdb_mutex_init(tdb); if (ret == -1) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_open_ex: tdb_mutex_init " "failed for ""%s: %s\n", name, strerror(errno))); goto fail; } } } } if ((header.magic1_hash == 0) && (header.magic2_hash == 0)) { /* older TDB without magic hash references */ tdb->hash_fn = tdb_old_hash; } else if (!check_header_hash(tdb, &header, !hash_fn, &magic1, &magic2)) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_open_ex: " "%s was not created with %s hash function we are using\n" "magic1_hash[0x%08X %s 0x%08X] " "magic2_hash[0x%08X %s 0x%08X]\n", name, hash_alg, header.magic1_hash, (header.magic1_hash == magic1) ? "==" : "!=", magic1, header.magic2_hash, (header.magic2_hash == magic2) ? "==" : "!=", magic2)); errno = EINVAL; goto fail; } /* Is it already in the open list? If so, fail. */ if (tdb_already_open(tdb->device, tdb->inode)) { TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: " "%s (%d,%d) is already open in this process\n", name, (int)tdb->device, (int)tdb->inode)); errno = EBUSY; goto fail; } /* * We had tdb_mmap(tdb) here before, * but we need to use tdb_fstat(), * which is triggered from tdb_oob() before calling tdb_mmap(). * As this skips tdb->hdr_ofs. */ tdb->map_size = 0; ret = tdb_oob(tdb, 0, 1, 0); if (ret == -1) { errno = EIO; goto fail; } if (tdb->feature_flags & TDB_FEATURE_FLAG_MUTEX) { if (!(tdb->flags & TDB_NOLOCK)) { ret = tdb_mutex_mmap(tdb); if (ret != 0) { goto fail; } } } if (tdb->hash_size > UINT32_MAX/4) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_open_ex: " "hash size %"PRIu32" too large\n", tdb->hash_size)); errno = EINVAL; goto fail; } ret = tdb_oob(tdb, FREELIST_TOP, 4*tdb->hash_size, 1); if (ret == -1) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_open_ex: " "hash size %"PRIu32" does not fit\n", tdb->hash_size)); errno = EINVAL; goto fail; } if (locked) { if (tdb_nest_unlock(tdb, ACTIVE_LOCK, F_WRLCK, false) == -1) { TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: " "failed to release ACTIVE_LOCK on %s: %s\n", name, strerror(errno))); goto fail; } } if (locked || (tdb_flags & TDB_CLEAR_IF_FIRST)) { /* * We always need to do this if the CLEAR_IF_FIRST * flag is set, even if we didn't get the initial * exclusive lock as we need to let all other users * know we're using it. */ ret = tdb_nest_lock(tdb, ACTIVE_LOCK, F_RDLCK, TDB_LOCK_WAIT); if (ret == -1) { goto fail; } } /* if needed, run recovery */ if (tdb_transaction_recover(tdb) == -1) { goto fail; } #ifdef TDB_TRACE { char tracefile[strlen(name) + 32]; snprintf(tracefile, sizeof(tracefile), "%s.trace.%li", name, (long)getpid()); tdb->tracefd = open(tracefile, O_WRONLY|O_CREAT|O_EXCL, 0600); if (tdb->tracefd >= 0) { tdb_enable_seqnum(tdb); tdb_trace_open(tdb, "tdb_open", hash_size, tdb_flags, open_flags); } else TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: failed to open trace file %s!\n", tracefile)); } #endif internal: /* Internal (memory-only) databases skip all the code above to * do with disk files, and resume here by releasing their * open lock and hooking into the active list. */ if (tdb_nest_unlock(tdb, OPEN_LOCK, F_WRLCK, false) == -1) { goto fail; } tdb->next = tdbs; tdbs = tdb; errno = orig_errno; return tdb; fail: { int save_errno = errno; if (!tdb) return NULL; #ifdef TDB_TRACE close(tdb->tracefd); #endif if (tdb->map_ptr) { if (tdb->flags & TDB_INTERNAL) SAFE_FREE(tdb->map_ptr); else tdb_munmap(tdb); } if (tdb->fd != -1) if (close(tdb->fd) != 0) TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: failed to close tdb->fd on error!\n")); SAFE_FREE(tdb->lockrecs); SAFE_FREE(tdb->name); SAFE_FREE(tdb); errno = save_errno; return NULL; } } /* * Set the maximum number of dead records per hash chain */ _PUBLIC_ void tdb_set_max_dead(struct tdb_context *tdb, int max_dead) { tdb->max_dead_records = max_dead; } /** * Close a database. * * @returns -1 for error; 0 for success. **/ _PUBLIC_ int tdb_close(struct tdb_context *tdb) { struct tdb_context **i; int ret = 0; if (tdb->transaction) { tdb_transaction_cancel(tdb); } tdb_trace(tdb, "tdb_close"); if (tdb->map_ptr) { if (tdb->flags & TDB_INTERNAL) SAFE_FREE(tdb->map_ptr); else tdb_munmap(tdb); } tdb_mutex_munmap(tdb); SAFE_FREE(tdb->name); if (tdb->fd != -1) { ret = close(tdb->fd); tdb->fd = -1; } SAFE_FREE(tdb->lockrecs); /* Remove from contexts list */ for (i = &tdbs; *i; i = &(*i)->next) { if (*i == tdb) { *i = tdb->next; break; } } #ifdef TDB_TRACE close(tdb->tracefd); #endif memset(tdb, 0, sizeof(*tdb)); SAFE_FREE(tdb); return ret; } /* register a loging function */ _PUBLIC_ void tdb_set_logging_function(struct tdb_context *tdb, const struct tdb_logging_context *log_ctx) { tdb->log = *log_ctx; } _PUBLIC_ void *tdb_get_logging_private(struct tdb_context *tdb) { return tdb->log.log_private; } static int tdb_reopen_internal(struct tdb_context *tdb, bool active_lock) { #if !defined(LIBREPLACE_PREAD_NOT_REPLACED) || \ !defined(LIBREPLACE_PWRITE_NOT_REPLACED) struct stat st; #endif if (tdb->flags & TDB_INTERNAL) { return 0; /* Nothing to do. */ } if (tdb_have_extra_locks(tdb)) { TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_reopen: reopen not allowed with locks held\n")); goto fail; } if (tdb->transaction != 0) { TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_reopen: reopen not allowed inside a transaction\n")); goto fail; } /* If we have real pread & pwrite, we can skip reopen. */ #if !defined(LIBREPLACE_PREAD_NOT_REPLACED) || \ !defined(LIBREPLACE_PWRITE_NOT_REPLACED) if (tdb_munmap(tdb) != 0) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_reopen: munmap failed (%s)\n", strerror(errno))); goto fail; } if (close(tdb->fd) != 0) TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_reopen: WARNING closing tdb->fd failed!\n")); tdb->fd = open(tdb->name, tdb->open_flags & ~(O_CREAT|O_TRUNC), 0); if (tdb->fd == -1) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_reopen: open failed (%s)\n", strerror(errno))); goto fail; } /* * We only use st.st_dev and st.st_ino from the raw fstat() * call, everything else needs to use tdb_fstat() in order * to skip tdb->hdr_ofs! */ if (fstat(tdb->fd, &st) != 0) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_reopen: fstat failed (%s)\n", strerror(errno))); goto fail; } if (st.st_ino != tdb->inode || st.st_dev != tdb->device) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_reopen: file dev/inode has changed!\n")); goto fail; } ZERO_STRUCT(st); /* * We had tdb_mmap(tdb) here before, * but we need to use tdb_fstat(), * which is triggered from tdb_oob() before calling tdb_mmap(). * As this skips tdb->hdr_ofs. */ tdb->map_size = 0; if (tdb_oob(tdb, 0, 1, 0) != 0) { goto fail; } #endif /* fake pread or pwrite */ /* We may still think we hold the active lock. */ tdb->num_lockrecs = 0; SAFE_FREE(tdb->lockrecs); tdb->lockrecs_array_length = 0; if (active_lock && tdb_nest_lock(tdb, ACTIVE_LOCK, F_RDLCK, TDB_LOCK_WAIT) == -1) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_reopen: failed to obtain active lock\n")); goto fail; } return 0; fail: tdb_close(tdb); return -1; } /* reopen a tdb - this can be used after a fork to ensure that we have an independent seek pointer from our parent and to re-establish locks */ _PUBLIC_ int tdb_reopen(struct tdb_context *tdb) { bool active_lock; active_lock = (tdb->flags & (TDB_CLEAR_IF_FIRST|TDB_MUTEX_LOCKING)); return tdb_reopen_internal(tdb, active_lock); } /* reopen all tdb's */ _PUBLIC_ int tdb_reopen_all(int parent_longlived) { struct tdb_context *tdb; for (tdb=tdbs; tdb; tdb = tdb->next) { bool active_lock; active_lock = (tdb->flags & (TDB_CLEAR_IF_FIRST|TDB_MUTEX_LOCKING)); /* * If the parent is longlived (ie. a * parent daemon architecture), we know * it will keep it's active lock on a * tdb opened with CLEAR_IF_FIRST. Thus * for child processes we don't have to * add an active lock. This is essential * to improve performance on systems that * keep POSIX locks as a non-scalable data * structure in the kernel. */ if (parent_longlived) { /* Ensure no clear-if-first. */ active_lock = false; } if (tdb_reopen_internal(tdb, active_lock) != 0) return -1; } return 0; } tdb-1.4.2/common/rescue.c0000660000000000000000000002012113527011454015143 0ustar rootroot00000000000000 /* Unix SMB/CIFS implementation. trivial database library, rescue attempt code. Copyright (C) Rusty Russell 2012 ** NOTE! The following LGPL license applies to the tdb ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "tdb_private.h" #include struct found { tdb_off_t head; /* 0 -> invalid. */ struct tdb_record rec; TDB_DATA key; bool in_hash; bool in_free; }; struct found_table { /* As an ordered array (by head offset). */ struct found *arr; unsigned int num, max; }; static bool looks_like_valid_record(struct tdb_context *tdb, tdb_off_t off, const struct tdb_record *rec, TDB_DATA *key) { unsigned int hval; if (rec->magic != TDB_MAGIC) return false; if (rec->key_len + rec->data_len > rec->rec_len) return false; if (rec->rec_len % TDB_ALIGNMENT) return false; /* Next pointer must make some sense. */ if (rec->next > 0 && rec->next < TDB_DATA_START(tdb->hash_size)) return false; if (tdb_oob(tdb, rec->next, sizeof(*rec), 1)) return false; key->dsize = rec->key_len; key->dptr = tdb_alloc_read(tdb, off + sizeof(*rec), key->dsize); if (!key->dptr) return false; hval = tdb->hash_fn(key); if (hval != rec->full_hash) { free(key->dptr); return false; } /* Caller frees up key->dptr */ return true; } static bool add_to_table(struct found_table *found, tdb_off_t off, struct tdb_record *rec, TDB_DATA key) { if (found->num + 1 > found->max) { struct found *new; found->max = (found->max ? found->max * 2 : 128); new = realloc(found->arr, found->max * sizeof(found->arr[0])); if (!new) return false; found->arr = new; } found->arr[found->num].head = off; found->arr[found->num].rec = *rec; found->arr[found->num].key = key; found->arr[found->num].in_hash = false; found->arr[found->num].in_free = false; found->num++; return true; } static bool walk_record(struct tdb_context *tdb, const struct found *f, void (*walk)(TDB_DATA, TDB_DATA, void *private_data), void *private_data) { TDB_DATA data; data.dsize = f->rec.data_len; data.dptr = tdb_alloc_read(tdb, f->head + sizeof(f->rec) + f->rec.key_len, data.dsize); if (!data.dptr) { if (tdb->ecode == TDB_ERR_OOM) return false; /* I/O errors are expected. */ return true; } walk(f->key, data, private_data); free(data.dptr); return true; } /* First entry which has offset >= this one. */ static unsigned int find_entry(struct found_table *found, tdb_off_t off) { unsigned int start = 0, end = found->num; while (start < end) { /* We can't overflow here. */ unsigned int mid = (start + end) / 2; if (off < found->arr[mid].head) { end = mid; } else if (off > found->arr[mid].head) { start = mid + 1; } else { return mid; } } assert(start == end); return end; } static void found_in_hashchain(struct found_table *found, tdb_off_t head) { unsigned int match; match = find_entry(found, head); if (match < found->num && found->arr[match].head == head) { found->arr[match].in_hash = true; } } static void mark_free_area(struct found_table *found, tdb_off_t head, tdb_len_t len) { unsigned int match; match = find_entry(found, head); /* Mark everything within this free entry. */ while (match < found->num) { if (found->arr[match].head >= head + len) { break; } found->arr[match].in_free = true; match++; } } static int cmp_key(const void *a, const void *b) { const struct found *fa = a, *fb = b; if (fa->key.dsize < fb->key.dsize) { return -1; } else if (fa->key.dsize > fb->key.dsize) { return 1; } return memcmp(fa->key.dptr, fb->key.dptr, fa->key.dsize); } static bool key_eq(TDB_DATA a, TDB_DATA b) { return a.dsize == b.dsize && memcmp(a.dptr, b.dptr, a.dsize) == 0; } static void free_table(struct found_table *found) { unsigned int i; for (i = 0; i < found->num; i++) { free(found->arr[i].key.dptr); } free(found->arr); } static void logging_suppressed(struct tdb_context *tdb, enum tdb_debug_level level, const char *fmt, ...) { } _PUBLIC_ int tdb_rescue(struct tdb_context *tdb, void (*walk)(TDB_DATA, TDB_DATA, void *private_data), void *private_data) { struct found_table found = { NULL, 0, 0 }; tdb_off_t h, off, i; tdb_log_func oldlog = tdb->log.log_fn; struct tdb_record rec; TDB_DATA key; bool locked; /* Read-only databases use no locking at all: it's best-effort. * We may have a write lock already, so skip that case too. */ if (tdb->read_only || tdb->allrecord_lock.count != 0) { locked = false; } else { if (tdb_lockall_read(tdb) == -1) return -1; locked = true; } /* Make sure we know true size of the underlying file. */ tdb_oob(tdb, tdb->map_size, 1, 1); /* Suppress logging, since we anticipate errors. */ tdb->log.log_fn = logging_suppressed; /* Now walk entire db looking for records. */ for (off = TDB_DATA_START(tdb->hash_size); off < tdb->map_size; off += TDB_ALIGNMENT) { if (tdb->methods->tdb_read(tdb, off, &rec, sizeof(rec), DOCONV()) == -1) continue; if (looks_like_valid_record(tdb, off, &rec, &key)) { if (!add_to_table(&found, off, &rec, key)) { goto oom; } } } /* Walk hash chains to positive vet. */ for (h = 0; h < 1+tdb->hash_size; h++) { bool slow_chase = false; tdb_off_t slow_off = FREELIST_TOP + h*sizeof(tdb_off_t); if (tdb_ofs_read(tdb, FREELIST_TOP + h*sizeof(tdb_off_t), &off) == -1) continue; while (off && off != slow_off) { if (tdb->methods->tdb_read(tdb, off, &rec, sizeof(rec), DOCONV()) != 0) { break; } /* 0 is the free list, rest are hash chains. */ if (h == 0) { /* Don't mark garbage as free. */ if (rec.magic != TDB_FREE_MAGIC) { break; } mark_free_area(&found, off, sizeof(rec) + rec.rec_len); } else { found_in_hashchain(&found, off); } off = rec.next; /* Loop detection using second pointer at half-speed */ if (slow_chase) { /* First entry happens to be next ptr */ tdb_ofs_read(tdb, slow_off, &slow_off); } slow_chase = !slow_chase; } } /* Recovery area: must be marked as free, since it often has old * records in there! */ if (tdb_ofs_read(tdb, TDB_RECOVERY_HEAD, &off) == 0 && off != 0) { if (tdb->methods->tdb_read(tdb, off, &rec, sizeof(rec), DOCONV()) == 0) { mark_free_area(&found, off, sizeof(rec) + rec.rec_len); } } /* Now sort by key! */ if (found.arr != NULL) { qsort(found.arr, found.num, sizeof(found.arr[0]), cmp_key); } for (i = 0; (found.arr != NULL) && i < found.num; ) { unsigned int num, num_in_hash = 0; /* How many are identical? */ for (num = 0; num < found.num - i; num++) { if (!key_eq(found.arr[i].key, found.arr[i+num].key)) { break; } if (found.arr[i+num].in_hash) { if (!walk_record(tdb, &found.arr[i+num], walk, private_data)) goto oom; num_in_hash++; } } assert(num); /* If none were in the hash, we print any not in free list. */ if (num_in_hash == 0) { unsigned int j; for (j = i; j < i + num; j++) { if (!found.arr[j].in_free) { if (!walk_record(tdb, &found.arr[j], walk, private_data)) goto oom; } } } i += num; } tdb->log.log_fn = oldlog; if (locked) { tdb_unlockall_read(tdb); } return 0; oom: tdb->log.log_fn = oldlog; tdb->ecode = TDB_ERR_OOM; TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_rescue: failed allocating\n")); free_table(&found); if (locked) { tdb_unlockall_read(tdb); } return -1; } tdb-1.4.2/common/summary.c0000660000000000000000000001362213444661620015366 0ustar rootroot00000000000000 /* Trivial Database: human-readable summary code Copyright (C) Rusty Russell 2010 This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "tdb_private.h" #define SUMMARY_FORMAT \ "Size of file/data: %llu/%zu\n" \ "Header offset/logical size: %zu/%zu\n" \ "Number of records: %zu\n" \ "Incompatible hash: %s\n" \ "Active/supported feature flags: 0x%08x/0x%08x\n" \ "Robust mutexes locking: %s\n" \ "Smallest/average/largest keys: %zu/%zu/%zu\n" \ "Smallest/average/largest data: %zu/%zu/%zu\n" \ "Smallest/average/largest padding: %zu/%zu/%zu\n" \ "Number of dead records: %zu\n" \ "Smallest/average/largest dead records: %zu/%zu/%zu\n" \ "Number of free records: %zu\n" \ "Smallest/average/largest free records: %zu/%zu/%zu\n" \ "Number of hash chains: %zu\n" \ "Smallest/average/largest hash chains: %zu/%zu/%zu\n" \ "Number of uncoalesced records: %zu\n" \ "Smallest/average/largest uncoalesced runs: %zu/%zu/%zu\n" \ "Percentage keys/data/padding/free/dead/rechdrs&tailers/hashes: %.0f/%.0f/%.0f/%.0f/%.0f/%.0f/%.0f\n" /* We don't use tally module, to keep upstream happy. */ struct tally { size_t min, max, total; size_t num; }; static void tally_init(struct tally *tally) { tally->total = 0; tally->num = 0; tally->min = tally->max = 0; } static void tally_add(struct tally *tally, size_t len) { if (tally->num == 0) tally->max = tally->min = len; else if (len > tally->max) tally->max = len; else if (len < tally->min) tally->min = len; tally->num++; tally->total += len; } static size_t tally_mean(const struct tally *tally) { if (!tally->num) return 0; return tally->total / tally->num; } static size_t get_hash_length(struct tdb_context *tdb, unsigned int i) { tdb_off_t rec_ptr; struct tdb_chainwalk_ctx chainwalk; size_t count = 0; if (tdb_ofs_read(tdb, TDB_HASH_TOP(i), &rec_ptr) == -1) return 0; tdb_chainwalk_init(&chainwalk, rec_ptr); /* keep looking until we find the right record */ while (rec_ptr) { struct tdb_record r; bool ok; ++count; if (tdb_rec_read(tdb, rec_ptr, &r) == -1) return 0; rec_ptr = r.next; ok = tdb_chainwalk_check(tdb, &chainwalk, rec_ptr); if (!ok) { return SIZE_MAX; } } return count; } _PUBLIC_ char *tdb_summary(struct tdb_context *tdb) { off_t file_size; tdb_off_t off, rec_off; struct tally freet, keys, data, dead, extra, hashval, uncoal; struct tdb_record rec; char *ret = NULL; bool locked; size_t unc = 0; int len; struct tdb_record recovery; /* Read-only databases use no locking at all: it's best-effort. * We may have a write lock already, so skip that case too. */ if (tdb->read_only || tdb->allrecord_lock.count != 0) { locked = false; } else { if (tdb_lockall_read(tdb) == -1) return NULL; locked = true; } if (tdb_recovery_area(tdb, tdb->methods, &rec_off, &recovery) != 0) { goto unlock; } tally_init(&freet); tally_init(&keys); tally_init(&data); tally_init(&dead); tally_init(&extra); tally_init(&hashval); tally_init(&uncoal); for (off = TDB_DATA_START(tdb->hash_size); off < tdb->map_size - 1; off += sizeof(rec) + rec.rec_len) { if (tdb->methods->tdb_read(tdb, off, &rec, sizeof(rec), DOCONV()) == -1) goto unlock; switch (rec.magic) { case TDB_MAGIC: tally_add(&keys, rec.key_len); tally_add(&data, rec.data_len); tally_add(&extra, rec.rec_len - (rec.key_len + rec.data_len)); if (unc > 1) tally_add(&uncoal, unc - 1); unc = 0; break; case TDB_FREE_MAGIC: tally_add(&freet, rec.rec_len); unc++; break; /* If we crash after ftruncate, we can get zeroes or fill. */ case TDB_RECOVERY_INVALID_MAGIC: case 0x42424242: unc++; /* If it's a valid recovery, we can trust rec_len. */ if (off != rec_off) { rec.rec_len = tdb_dead_space(tdb, off) - sizeof(rec); } FALL_THROUGH; case TDB_DEAD_MAGIC: tally_add(&dead, rec.rec_len); break; default: TDB_LOG((tdb, TDB_DEBUG_ERROR, "Unexpected record magic 0x%x at offset %u\n", rec.magic, off)); goto unlock; } } if (unc > 1) tally_add(&uncoal, unc - 1); for (off = 0; off < tdb->hash_size; off++) tally_add(&hashval, get_hash_length(tdb, off)); file_size = tdb->hdr_ofs + tdb->map_size; len = asprintf(&ret, SUMMARY_FORMAT, (unsigned long long)file_size, keys.total+data.total, (size_t)tdb->hdr_ofs, (size_t)tdb->map_size, keys.num, (tdb->hash_fn == tdb_jenkins_hash)?"yes":"no", (unsigned)tdb->feature_flags, TDB_SUPPORTED_FEATURE_FLAGS, (tdb->feature_flags & TDB_FEATURE_FLAG_MUTEX)?"yes":"no", keys.min, tally_mean(&keys), keys.max, data.min, tally_mean(&data), data.max, extra.min, tally_mean(&extra), extra.max, dead.num, dead.min, tally_mean(&dead), dead.max, freet.num, freet.min, tally_mean(&freet), freet.max, hashval.num, hashval.min, tally_mean(&hashval), hashval.max, uncoal.total, uncoal.min, tally_mean(&uncoal), uncoal.max, keys.total * 100.0 / file_size, data.total * 100.0 / file_size, extra.total * 100.0 / file_size, freet.total * 100.0 / file_size, dead.total * 100.0 / file_size, (keys.num + freet.num + dead.num) * (sizeof(struct tdb_record) + sizeof(uint32_t)) * 100.0 / file_size, tdb->hash_size * sizeof(tdb_off_t) * 100.0 / file_size); if (len == -1) { goto unlock; } unlock: if (locked) { tdb_unlockall_read(tdb); } return ret; } tdb-1.4.2/common/tdb.c0000660000000000000000000007562213527011454014446 0ustar rootroot00000000000000 /* Unix SMB/CIFS implementation. trivial database library Copyright (C) Andrew Tridgell 1999-2005 Copyright (C) Paul `Rusty' Russell 2000 Copyright (C) Jeremy Allison 2000-2003 ** NOTE! The following LGPL license applies to the tdb ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "tdb_private.h" _PUBLIC_ TDB_DATA tdb_null; /* non-blocking increment of the tdb sequence number if the tdb has been opened using the TDB_SEQNUM flag */ _PUBLIC_ void tdb_increment_seqnum_nonblock(struct tdb_context *tdb) { tdb_off_t seqnum=0; if (!(tdb->flags & TDB_SEQNUM)) { return; } /* we ignore errors from this, as we have no sane way of dealing with them. */ tdb_ofs_read(tdb, TDB_SEQNUM_OFS, &seqnum); seqnum++; tdb_ofs_write(tdb, TDB_SEQNUM_OFS, &seqnum); } /* increment the tdb sequence number if the tdb has been opened using the TDB_SEQNUM flag */ static void tdb_increment_seqnum(struct tdb_context *tdb) { if (!(tdb->flags & TDB_SEQNUM)) { return; } if (tdb->transaction != NULL) { tdb_increment_seqnum_nonblock(tdb); return; } if (tdb_nest_lock(tdb, TDB_SEQNUM_OFS, F_WRLCK, TDB_LOCK_WAIT|TDB_LOCK_PROBE) != 0) { return; } tdb_increment_seqnum_nonblock(tdb); tdb_nest_unlock(tdb, TDB_SEQNUM_OFS, F_WRLCK, false); } static int tdb_key_compare(TDB_DATA key, TDB_DATA data, void *private_data) { return memcmp(data.dptr, key.dptr, data.dsize); } void tdb_chainwalk_init(struct tdb_chainwalk_ctx *ctx, tdb_off_t ptr) { *ctx = (struct tdb_chainwalk_ctx) { .slow_ptr = ptr }; } bool tdb_chainwalk_check(struct tdb_context *tdb, struct tdb_chainwalk_ctx *ctx, tdb_off_t next_ptr) { int ret; if (ctx->slow_chase) { ret = tdb_ofs_read(tdb, ctx->slow_ptr, &ctx->slow_ptr); if (ret == -1) { return false; } } ctx->slow_chase = !ctx->slow_chase; if (next_ptr == ctx->slow_ptr) { tdb->ecode = TDB_ERR_CORRUPT; TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_chainwalk_check: circular chain\n")); return false; } return true; } /* Returns 0 on fail. On success, return offset of record, and fills in rec */ static tdb_off_t tdb_find(struct tdb_context *tdb, TDB_DATA key, uint32_t hash, struct tdb_record *r) { tdb_off_t rec_ptr; struct tdb_chainwalk_ctx chainwalk; /* read in the hash top */ if (tdb_ofs_read(tdb, TDB_HASH_TOP(hash), &rec_ptr) == -1) return 0; tdb_chainwalk_init(&chainwalk, rec_ptr); /* keep looking until we find the right record */ while (rec_ptr) { bool ok; if (tdb_rec_read(tdb, rec_ptr, r) == -1) return 0; if (!TDB_DEAD(r) && hash==r->full_hash && key.dsize==r->key_len && tdb_parse_data(tdb, key, rec_ptr + sizeof(*r), r->key_len, tdb_key_compare, NULL) == 0) { return rec_ptr; } rec_ptr = r->next; ok = tdb_chainwalk_check(tdb, &chainwalk, rec_ptr); if (!ok) { return 0; } } tdb->ecode = TDB_ERR_NOEXIST; return 0; } /* As tdb_find, but if you succeed, keep the lock */ tdb_off_t tdb_find_lock_hash(struct tdb_context *tdb, TDB_DATA key, uint32_t hash, int locktype, struct tdb_record *rec) { uint32_t rec_ptr; if (tdb_lock(tdb, BUCKET(hash), locktype) == -1) return 0; if (!(rec_ptr = tdb_find(tdb, key, hash, rec))) tdb_unlock(tdb, BUCKET(hash), locktype); return rec_ptr; } static TDB_DATA _tdb_fetch(struct tdb_context *tdb, TDB_DATA key); struct tdb_update_hash_state { const TDB_DATA *dbufs; int num_dbufs; tdb_len_t dbufs_len; }; static int tdb_update_hash_cmp(TDB_DATA key, TDB_DATA data, void *private_data) { struct tdb_update_hash_state *state = private_data; unsigned char *dptr = data.dptr; int i; if (state->dbufs_len != data.dsize) { return -1; } for (i=0; inum_dbufs; i++) { TDB_DATA dbuf = state->dbufs[i]; if( dbuf.dsize > 0) { int ret; ret = memcmp(dptr, dbuf.dptr, dbuf.dsize); if (ret != 0) { return -1; } dptr += dbuf.dsize; } } return 0; } /* update an entry in place - this only works if the new data size is <= the old data size and the key exists. on failure return -1. */ static int tdb_update_hash(struct tdb_context *tdb, TDB_DATA key, uint32_t hash, const TDB_DATA *dbufs, int num_dbufs, tdb_len_t dbufs_len) { struct tdb_record rec; tdb_off_t rec_ptr, ofs; int i; /* find entry */ if (!(rec_ptr = tdb_find(tdb, key, hash, &rec))) return -1; /* it could be an exact duplicate of what is there - this is * surprisingly common (eg. with a ldb re-index). */ if (rec.data_len == dbufs_len) { struct tdb_update_hash_state state = { .dbufs = dbufs, .num_dbufs = num_dbufs, .dbufs_len = dbufs_len }; int ret; ret = tdb_parse_record(tdb, key, tdb_update_hash_cmp, &state); if (ret == 0) { return 0; } } /* must be long enough key, data and tailer */ if (rec.rec_len < key.dsize + dbufs_len + sizeof(tdb_off_t)) { tdb->ecode = TDB_SUCCESS; /* Not really an error */ return -1; } ofs = rec_ptr + sizeof(rec) + rec.key_len; for (i=0; imethods->tdb_write(tdb, ofs, dbuf.dptr, dbuf.dsize); if (ret == -1) { return -1; } ofs += dbuf.dsize; } if (dbufs_len != rec.data_len) { /* update size */ rec.data_len = dbufs_len; return tdb_rec_write(tdb, rec_ptr, &rec); } return 0; } /* find an entry in the database given a key */ /* If an entry doesn't exist tdb_err will be set to * TDB_ERR_NOEXIST. If a key has no data attached * then the TDB_DATA will have zero length but * a non-zero pointer */ static TDB_DATA _tdb_fetch(struct tdb_context *tdb, TDB_DATA key) { tdb_off_t rec_ptr; struct tdb_record rec; TDB_DATA ret; uint32_t hash; /* find which hash bucket it is in */ hash = tdb->hash_fn(&key); if (!(rec_ptr = tdb_find_lock_hash(tdb,key,hash,F_RDLCK,&rec))) return tdb_null; ret.dptr = tdb_alloc_read(tdb, rec_ptr + sizeof(rec) + rec.key_len, rec.data_len); ret.dsize = rec.data_len; tdb_unlock(tdb, BUCKET(rec.full_hash), F_RDLCK); return ret; } _PUBLIC_ TDB_DATA tdb_fetch(struct tdb_context *tdb, TDB_DATA key) { TDB_DATA ret = _tdb_fetch(tdb, key); tdb_trace_1rec_retrec(tdb, "tdb_fetch", key, ret); return ret; } /* * Find an entry in the database and hand the record's data to a parsing * function. The parsing function is executed under the chain read lock, so it * should be fast and should not block on other syscalls. * * DON'T CALL OTHER TDB CALLS FROM THE PARSER, THIS MIGHT LEAD TO SEGFAULTS. * * For mmapped tdb's that do not have a transaction open it points the parsing * function directly at the mmap area, it avoids the malloc/memcpy in this * case. If a transaction is open or no mmap is available, it has to do * malloc/read/parse/free. * * This is interesting for all readers of potentially large data structures in * the tdb records, ldb indexes being one example. * * Return -1 if the record was not found. */ _PUBLIC_ int tdb_parse_record(struct tdb_context *tdb, TDB_DATA key, int (*parser)(TDB_DATA key, TDB_DATA data, void *private_data), void *private_data) { tdb_off_t rec_ptr; struct tdb_record rec; int ret; uint32_t hash; /* find which hash bucket it is in */ hash = tdb->hash_fn(&key); if (!(rec_ptr = tdb_find_lock_hash(tdb,key,hash,F_RDLCK,&rec))) { /* record not found */ tdb_trace_1rec_ret(tdb, "tdb_parse_record", key, -1); tdb->ecode = TDB_ERR_NOEXIST; return -1; } tdb_trace_1rec_ret(tdb, "tdb_parse_record", key, 0); ret = tdb_parse_data(tdb, key, rec_ptr + sizeof(rec) + rec.key_len, rec.data_len, parser, private_data); tdb_unlock(tdb, BUCKET(rec.full_hash), F_RDLCK); return ret; } /* check if an entry in the database exists note that 1 is returned if the key is found and 0 is returned if not found this doesn't match the conventions in the rest of this module, but is compatible with gdbm */ static int tdb_exists_hash(struct tdb_context *tdb, TDB_DATA key, uint32_t hash) { struct tdb_record rec; if (tdb_find_lock_hash(tdb, key, hash, F_RDLCK, &rec) == 0) return 0; tdb_unlock(tdb, BUCKET(rec.full_hash), F_RDLCK); return 1; } _PUBLIC_ int tdb_exists(struct tdb_context *tdb, TDB_DATA key) { uint32_t hash = tdb->hash_fn(&key); int ret; ret = tdb_exists_hash(tdb, key, hash); tdb_trace_1rec_ret(tdb, "tdb_exists", key, ret); return ret; } /* * Move a dead record to the freelist. The hash chain and freelist * must be locked. */ static int tdb_del_dead(struct tdb_context *tdb, uint32_t last_ptr, uint32_t rec_ptr, struct tdb_record *rec, bool *deleted) { int ret; ret = tdb_write_lock_record(tdb, rec_ptr); if (ret == -1) { /* Someone traversing here: Just leave it dead */ return 0; } ret = tdb_write_unlock_record(tdb, rec_ptr); if (ret == -1) { return -1; } ret = tdb_ofs_write(tdb, last_ptr, &rec->next); if (ret == -1) { return -1; } *deleted = true; ret = tdb_free(tdb, rec_ptr, rec); return ret; } /* * Walk the hash chain and leave tdb->max_dead_records around. Move * the rest of dead records to the freelist. */ int tdb_trim_dead(struct tdb_context *tdb, uint32_t hash) { struct tdb_chainwalk_ctx chainwalk; struct tdb_record rec; tdb_off_t last_ptr, rec_ptr; bool locked_freelist = false; int num_dead = 0; int ret; last_ptr = TDB_HASH_TOP(hash); /* * Init chainwalk with the pointer to the hash top. It might * be that the very first record in the chain is a dead one * that we have to delete. */ tdb_chainwalk_init(&chainwalk, last_ptr); ret = tdb_ofs_read(tdb, last_ptr, &rec_ptr); if (ret == -1) { return -1; } while (rec_ptr != 0) { bool deleted = false; uint32_t next; ret = tdb_rec_read(tdb, rec_ptr, &rec); if (ret == -1) { goto fail; } /* * Make a copy of rec.next: Further down we might * delete and put the record on the freelist. Make * sure that modifications in that code path can't * break the chainwalk here. */ next = rec.next; if (rec.magic == TDB_DEAD_MAGIC) { num_dead += 1; if (num_dead > tdb->max_dead_records) { if (!locked_freelist) { /* * Lock the freelist only if * it's really required. */ ret = tdb_lock(tdb, -1, F_WRLCK); if (ret == -1) { goto fail; }; locked_freelist = true; } ret = tdb_del_dead( tdb, last_ptr, rec_ptr, &rec, &deleted); if (ret == -1) { goto fail; } } } /* * Don't do the chainwalk check if "rec_ptr" was * deleted. We reduced the chain, and the chainwalk * check might catch up early. Imagine a valid chain * with just dead records: We never can bump the * "slow" pointer in chainwalk_check, as there isn't * anything left to jump to and compare. */ if (!deleted) { bool ok; last_ptr = rec_ptr; ok = tdb_chainwalk_check(tdb, &chainwalk, next); if (!ok) { ret = -1; goto fail; } } rec_ptr = next; } ret = 0; fail: if (locked_freelist) { tdb_unlock(tdb, -1, F_WRLCK); } return ret; } /* delete an entry in the database given a key */ static int tdb_delete_hash(struct tdb_context *tdb, TDB_DATA key, uint32_t hash) { tdb_off_t rec_ptr; struct tdb_record rec; int ret; if (tdb->read_only || tdb->traverse_read) { tdb->ecode = TDB_ERR_RDONLY; return -1; } rec_ptr = tdb_find_lock_hash(tdb, key, hash, F_WRLCK, &rec); if (rec_ptr == 0) { return -1; } /* * Mark the record dead */ rec.magic = TDB_DEAD_MAGIC; ret = tdb_rec_write(tdb, rec_ptr, &rec); if (ret == -1) { goto done; } tdb_increment_seqnum(tdb); ret = tdb_trim_dead(tdb, hash); done: if (tdb_unlock(tdb, BUCKET(hash), F_WRLCK) != 0) TDB_LOG((tdb, TDB_DEBUG_WARNING, "tdb_delete: WARNING tdb_unlock failed!\n")); return ret; } _PUBLIC_ int tdb_delete(struct tdb_context *tdb, TDB_DATA key) { uint32_t hash = tdb->hash_fn(&key); int ret; ret = tdb_delete_hash(tdb, key, hash); tdb_trace_1rec_ret(tdb, "tdb_delete", key, ret); return ret; } /* * See if we have a dead record around with enough space */ tdb_off_t tdb_find_dead(struct tdb_context *tdb, uint32_t hash, struct tdb_record *r, tdb_len_t length, tdb_off_t *p_last_ptr) { tdb_off_t rec_ptr, last_ptr; struct tdb_chainwalk_ctx chainwalk; tdb_off_t best_rec_ptr = 0; tdb_off_t best_last_ptr = 0; struct tdb_record best = { .rec_len = UINT32_MAX }; length += sizeof(tdb_off_t); /* tailer */ last_ptr = TDB_HASH_TOP(hash); /* read in the hash top */ if (tdb_ofs_read(tdb, last_ptr, &rec_ptr) == -1) return 0; tdb_chainwalk_init(&chainwalk, rec_ptr); /* keep looking until we find the right record */ while (rec_ptr) { bool ok; if (tdb_rec_read(tdb, rec_ptr, r) == -1) return 0; if (TDB_DEAD(r) && (r->rec_len >= length) && (r->rec_len < best.rec_len)) { best_rec_ptr = rec_ptr; best_last_ptr = last_ptr; best = *r; } last_ptr = rec_ptr; rec_ptr = r->next; ok = tdb_chainwalk_check(tdb, &chainwalk, rec_ptr); if (!ok) { return 0; } } if (best.rec_len == UINT32_MAX) { return 0; } *r = best; *p_last_ptr = best_last_ptr; return best_rec_ptr; } static int _tdb_storev(struct tdb_context *tdb, TDB_DATA key, const TDB_DATA *dbufs, int num_dbufs, int flag, uint32_t hash) { struct tdb_record rec; tdb_off_t rec_ptr, ofs; tdb_len_t rec_len, dbufs_len; int i; int ret = -1; dbufs_len = 0; for (i=0; iecode = TDB_ERR_EINVAL; goto fail; } dbufs_len += dsize; if (dbufs_len < dsize) { tdb->ecode = TDB_ERR_OOM; goto fail; } } rec_len = key.dsize + dbufs_len; if ((rec_len < key.dsize) || (rec_len < dbufs_len)) { tdb->ecode = TDB_ERR_OOM; goto fail; } /* check for it existing, on insert. */ if (flag == TDB_INSERT) { if (tdb_exists_hash(tdb, key, hash)) { tdb->ecode = TDB_ERR_EXISTS; goto fail; } } else { /* first try in-place update, on modify or replace. */ if (tdb_update_hash(tdb, key, hash, dbufs, num_dbufs, dbufs_len) == 0) { goto done; } if (tdb->ecode == TDB_ERR_NOEXIST && flag == TDB_MODIFY) { /* if the record doesn't exist and we are in TDB_MODIFY mode then we should fail the store */ goto fail; } } /* reset the error code potentially set by the tdb_update_hash() */ tdb->ecode = TDB_SUCCESS; /* delete any existing record - if it doesn't exist we don't care. Doing this first reduces fragmentation, and avoids coalescing with `allocated' block before it's updated. */ if (flag != TDB_INSERT) tdb_delete_hash(tdb, key, hash); /* we have to allocate some space */ rec_ptr = tdb_allocate(tdb, hash, rec_len, &rec); if (rec_ptr == 0) { goto fail; } /* Read hash top into next ptr */ if (tdb_ofs_read(tdb, TDB_HASH_TOP(hash), &rec.next) == -1) goto fail; rec.key_len = key.dsize; rec.data_len = dbufs_len; rec.full_hash = hash; rec.magic = TDB_MAGIC; ofs = rec_ptr; /* write out and point the top of the hash chain at it */ ret = tdb_rec_write(tdb, ofs, &rec); if (ret == -1) { goto fail; } ofs += sizeof(rec); ret = tdb->methods->tdb_write(tdb, ofs, key.dptr, key.dsize); if (ret == -1) { goto fail; } ofs += key.dsize; for (i=0; imethods->tdb_write(tdb, ofs, dbufs[i].dptr, dbufs[i].dsize); if (ret == -1) { goto fail; } ofs += dbufs[i].dsize; } ret = tdb_ofs_write(tdb, TDB_HASH_TOP(hash), &rec_ptr); if (ret == -1) { /* Need to tdb_unallocate() here */ goto fail; } done: ret = 0; fail: if (ret == 0) { tdb_increment_seqnum(tdb); } return ret; } static int _tdb_store(struct tdb_context *tdb, TDB_DATA key, TDB_DATA dbuf, int flag, uint32_t hash) { return _tdb_storev(tdb, key, &dbuf, 1, flag, hash); } /* store an element in the database, replacing any existing element with the same key return 0 on success, -1 on failure */ _PUBLIC_ int tdb_store(struct tdb_context *tdb, TDB_DATA key, TDB_DATA dbuf, int flag) { uint32_t hash; int ret; if (tdb->read_only || tdb->traverse_read) { tdb->ecode = TDB_ERR_RDONLY; tdb_trace_2rec_flag_ret(tdb, "tdb_store", key, dbuf, flag, -1); return -1; } /* find which hash bucket it is in */ hash = tdb->hash_fn(&key); if (tdb_lock(tdb, BUCKET(hash), F_WRLCK) == -1) return -1; ret = _tdb_store(tdb, key, dbuf, flag, hash); tdb_trace_2rec_flag_ret(tdb, "tdb_store", key, dbuf, flag, ret); tdb_unlock(tdb, BUCKET(hash), F_WRLCK); return ret; } _PUBLIC_ int tdb_storev(struct tdb_context *tdb, TDB_DATA key, const TDB_DATA *dbufs, int num_dbufs, int flag) { uint32_t hash; int ret; if (tdb->read_only || tdb->traverse_read) { tdb->ecode = TDB_ERR_RDONLY; tdb_trace_1plusn_rec_flag_ret(tdb, "tdb_storev", key, dbufs, num_dbufs, flag, -1); return -1; } /* find which hash bucket it is in */ hash = tdb->hash_fn(&key); if (tdb_lock(tdb, BUCKET(hash), F_WRLCK) == -1) return -1; ret = _tdb_storev(tdb, key, dbufs, num_dbufs, flag, hash); tdb_trace_1plusn_rec_flag_ret(tdb, "tdb_storev", key, dbufs, num_dbufs, flag, -1); tdb_unlock(tdb, BUCKET(hash), F_WRLCK); return ret; } /* Append to an entry. Create if not exist. */ _PUBLIC_ int tdb_append(struct tdb_context *tdb, TDB_DATA key, TDB_DATA new_dbuf) { uint32_t hash; TDB_DATA dbufs[2]; int ret = -1; /* find which hash bucket it is in */ hash = tdb->hash_fn(&key); if (tdb_lock(tdb, BUCKET(hash), F_WRLCK) == -1) return -1; dbufs[0] = _tdb_fetch(tdb, key); dbufs[1] = new_dbuf; ret = _tdb_storev(tdb, key, dbufs, 2, 0, hash); tdb_trace_2rec_retrec(tdb, "tdb_append", key, dbufs[0], dbufs[1]); tdb_unlock(tdb, BUCKET(hash), F_WRLCK); SAFE_FREE(dbufs[0].dptr); return ret; } /* return the name of the current tdb file useful for external logging functions */ _PUBLIC_ const char *tdb_name(struct tdb_context *tdb) { return tdb->name; } /* return the underlying file descriptor being used by tdb, or -1 useful for external routines that want to check the device/inode of the fd */ _PUBLIC_ int tdb_fd(struct tdb_context *tdb) { return tdb->fd; } /* return the current logging function useful for external tdb routines that wish to log tdb errors */ _PUBLIC_ tdb_log_func tdb_log_fn(struct tdb_context *tdb) { return tdb->log.log_fn; } /* get the tdb sequence number. Only makes sense if the writers opened with TDB_SEQNUM set. Note that this sequence number will wrap quite quickly, so it should only be used for a 'has something changed' test, not for code that relies on the count of the number of changes made. If you want a counter then use a tdb record. The aim of this sequence number is to allow for a very lightweight test of a possible tdb change. */ _PUBLIC_ int tdb_get_seqnum(struct tdb_context *tdb) { tdb_off_t seqnum=0; tdb_ofs_read(tdb, TDB_SEQNUM_OFS, &seqnum); return seqnum; } _PUBLIC_ int tdb_hash_size(struct tdb_context *tdb) { return tdb->hash_size; } _PUBLIC_ size_t tdb_map_size(struct tdb_context *tdb) { return tdb->map_size; } _PUBLIC_ int tdb_get_flags(struct tdb_context *tdb) { return tdb->flags; } _PUBLIC_ void tdb_add_flags(struct tdb_context *tdb, unsigned flags) { if ((flags & TDB_ALLOW_NESTING) && (flags & TDB_DISALLOW_NESTING)) { tdb->ecode = TDB_ERR_NESTING; TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_add_flags: " "allow_nesting and disallow_nesting are not allowed together!")); return; } if (flags & TDB_ALLOW_NESTING) { tdb->flags &= ~TDB_DISALLOW_NESTING; } if (flags & TDB_DISALLOW_NESTING) { tdb->flags &= ~TDB_ALLOW_NESTING; } tdb->flags |= flags; } _PUBLIC_ void tdb_remove_flags(struct tdb_context *tdb, unsigned flags) { if ((flags & TDB_ALLOW_NESTING) && (flags & TDB_DISALLOW_NESTING)) { tdb->ecode = TDB_ERR_NESTING; TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_remove_flags: " "allow_nesting and disallow_nesting are not allowed together!")); return; } if ((flags & TDB_NOLOCK) && (tdb->feature_flags & TDB_FEATURE_FLAG_MUTEX) && (tdb->mutexes == NULL)) { tdb->ecode = TDB_ERR_LOCK; TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_remove_flags: " "Can not remove NOLOCK flag on mutexed databases")); return; } if (flags & TDB_ALLOW_NESTING) { tdb->flags |= TDB_DISALLOW_NESTING; } if (flags & TDB_DISALLOW_NESTING) { tdb->flags |= TDB_ALLOW_NESTING; } tdb->flags &= ~flags; } /* enable sequence number handling on an open tdb */ _PUBLIC_ void tdb_enable_seqnum(struct tdb_context *tdb) { tdb->flags |= TDB_SEQNUM; } /* add a region of the file to the freelist. Length is the size of the region in bytes, which includes the free list header that needs to be added */ static int tdb_free_region(struct tdb_context *tdb, tdb_off_t offset, ssize_t length) { struct tdb_record rec; if (length <= sizeof(rec)) { /* the region is not worth adding */ return 0; } if (length + offset > tdb->map_size) { TDB_LOG((tdb, TDB_DEBUG_FATAL,"tdb_free_region: adding region beyond end of file\n")); return -1; } memset(&rec,'\0',sizeof(rec)); rec.rec_len = length - sizeof(rec); if (tdb_free(tdb, offset, &rec) == -1) { TDB_LOG((tdb, TDB_DEBUG_FATAL,"tdb_free_region: failed to add free record\n")); return -1; } return 0; } /* wipe the entire database, deleting all records. This can be done very fast by using a allrecord lock. The entire data portion of the file becomes a single entry in the freelist. This code carefully steps around the recovery area, leaving it alone */ _PUBLIC_ int tdb_wipe_all(struct tdb_context *tdb) { uint32_t i; tdb_off_t offset = 0; ssize_t data_len; tdb_off_t recovery_head; tdb_len_t recovery_size = 0; if (tdb_lockall(tdb) != 0) { return -1; } tdb_trace(tdb, "tdb_wipe_all"); /* see if the tdb has a recovery area, and remember its size if so. We don't want to lose this as otherwise each tdb_wipe_all() in a transaction will increase the size of the tdb by the size of the recovery area */ if (tdb_ofs_read(tdb, TDB_RECOVERY_HEAD, &recovery_head) == -1) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_wipe_all: failed to read recovery head\n")); goto failed; } if (recovery_head != 0) { struct tdb_record rec; if (tdb->methods->tdb_read(tdb, recovery_head, &rec, sizeof(rec), DOCONV()) == -1) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_wipe_all: failed to read recovery record\n")); return -1; } recovery_size = rec.rec_len + sizeof(rec); } /* wipe the hashes */ for (i=0;ihash_size;i++) { if (tdb_ofs_write(tdb, TDB_HASH_TOP(i), &offset) == -1) { TDB_LOG((tdb, TDB_DEBUG_FATAL,"tdb_wipe_all: failed to write hash %d\n", i)); goto failed; } } /* wipe the freelist */ if (tdb_ofs_write(tdb, FREELIST_TOP, &offset) == -1) { TDB_LOG((tdb, TDB_DEBUG_FATAL,"tdb_wipe_all: failed to write freelist\n")); goto failed; } /* add all the rest of the file to the freelist, possibly leaving a gap for the recovery area */ if (recovery_size == 0) { /* the simple case - the whole file can be used as a freelist */ data_len = (tdb->map_size - TDB_DATA_START(tdb->hash_size)); if (tdb_free_region(tdb, TDB_DATA_START(tdb->hash_size), data_len) != 0) { goto failed; } } else { /* we need to add two freelist entries - one on either side of the recovery area Note that we cannot shift the recovery area during this operation. Only the transaction.c code may move the recovery area or we risk subtle data corruption */ data_len = (recovery_head - TDB_DATA_START(tdb->hash_size)); if (tdb_free_region(tdb, TDB_DATA_START(tdb->hash_size), data_len) != 0) { goto failed; } /* and the 2nd free list entry after the recovery area - if any */ data_len = tdb->map_size - (recovery_head+recovery_size); if (tdb_free_region(tdb, recovery_head+recovery_size, data_len) != 0) { goto failed; } } tdb_increment_seqnum_nonblock(tdb); if (tdb_unlockall(tdb) != 0) { TDB_LOG((tdb, TDB_DEBUG_FATAL,"tdb_wipe_all: failed to unlock\n")); goto failed; } return 0; failed: tdb_unlockall(tdb); return -1; } struct traverse_state { bool error; struct tdb_context *dest_db; }; /* traverse function for repacking */ static int repack_traverse(struct tdb_context *tdb, TDB_DATA key, TDB_DATA data, void *private_data) { struct traverse_state *state = (struct traverse_state *)private_data; if (tdb_store(state->dest_db, key, data, TDB_INSERT) != 0) { state->error = true; return -1; } return 0; } /* repack a tdb */ _PUBLIC_ int tdb_repack(struct tdb_context *tdb) { struct tdb_context *tmp_db; struct traverse_state state; tdb_trace(tdb, "tdb_repack"); if (tdb_transaction_start(tdb) != 0) { TDB_LOG((tdb, TDB_DEBUG_FATAL, __location__ " Failed to start transaction\n")); return -1; } tmp_db = tdb_open("tmpdb", tdb_hash_size(tdb), TDB_INTERNAL, O_RDWR|O_CREAT, 0); if (tmp_db == NULL) { TDB_LOG((tdb, TDB_DEBUG_FATAL, __location__ " Failed to create tmp_db\n")); tdb_transaction_cancel(tdb); return -1; } state.error = false; state.dest_db = tmp_db; if (tdb_traverse_read(tdb, repack_traverse, &state) == -1) { TDB_LOG((tdb, TDB_DEBUG_FATAL, __location__ " Failed to traverse copying out\n")); tdb_transaction_cancel(tdb); tdb_close(tmp_db); return -1; } if (state.error) { TDB_LOG((tdb, TDB_DEBUG_FATAL, __location__ " Error during traversal\n")); tdb_transaction_cancel(tdb); tdb_close(tmp_db); return -1; } if (tdb_wipe_all(tdb) != 0) { TDB_LOG((tdb, TDB_DEBUG_FATAL, __location__ " Failed to wipe database\n")); tdb_transaction_cancel(tdb); tdb_close(tmp_db); return -1; } state.error = false; state.dest_db = tdb; if (tdb_traverse_read(tmp_db, repack_traverse, &state) == -1) { TDB_LOG((tdb, TDB_DEBUG_FATAL, __location__ " Failed to traverse copying back\n")); tdb_transaction_cancel(tdb); tdb_close(tmp_db); return -1; } if (state.error) { TDB_LOG((tdb, TDB_DEBUG_FATAL, __location__ " Error during second traversal\n")); tdb_transaction_cancel(tdb); tdb_close(tmp_db); return -1; } tdb_close(tmp_db); if (tdb_transaction_commit(tdb) != 0) { TDB_LOG((tdb, TDB_DEBUG_FATAL, __location__ " Failed to commit\n")); return -1; } return 0; } /* Even on files, we can get partial writes due to signals. */ bool tdb_write_all(int fd, const void *buf, size_t count) { while (count) { ssize_t ret; ret = write(fd, buf, count); if (ret < 0) return false; buf = (const char *)buf + ret; count -= ret; } return true; } bool tdb_add_off_t(tdb_off_t a, tdb_off_t b, tdb_off_t *pret) { tdb_off_t ret = a + b; if ((ret < a) || (ret < b)) { return false; } *pret = ret; return true; } #ifdef TDB_TRACE static void tdb_trace_write(struct tdb_context *tdb, const char *str) { if (!tdb_write_all(tdb->tracefd, str, strlen(str))) { close(tdb->tracefd); tdb->tracefd = -1; } } static void tdb_trace_start(struct tdb_context *tdb) { tdb_off_t seqnum=0; char msg[sizeof(tdb_off_t) * 4 + 1]; tdb_ofs_read(tdb, TDB_SEQNUM_OFS, &seqnum); snprintf(msg, sizeof(msg), "%u ", seqnum); tdb_trace_write(tdb, msg); } static void tdb_trace_end(struct tdb_context *tdb) { tdb_trace_write(tdb, "\n"); } static void tdb_trace_end_ret(struct tdb_context *tdb, int ret) { char msg[sizeof(ret) * 4 + 4]; snprintf(msg, sizeof(msg), " = %i\n", ret); tdb_trace_write(tdb, msg); } static void tdb_trace_record(struct tdb_context *tdb, TDB_DATA rec) { char msg[20 + rec.dsize*2], *p; unsigned int i; /* We differentiate zero-length records from non-existent ones. */ if (rec.dptr == NULL) { tdb_trace_write(tdb, " NULL"); return; } /* snprintf here is purely cargo-cult programming. */ p = msg; p += snprintf(p, sizeof(msg), " %zu:", rec.dsize); for (i = 0; i < rec.dsize; i++) p += snprintf(p, 2, "%02x", rec.dptr[i]); tdb_trace_write(tdb, msg); } void tdb_trace(struct tdb_context *tdb, const char *op) { tdb_trace_start(tdb); tdb_trace_write(tdb, op); tdb_trace_end(tdb); } void tdb_trace_seqnum(struct tdb_context *tdb, uint32_t seqnum, const char *op) { char msg[sizeof(tdb_off_t) * 4 + 1]; snprintf(msg, sizeof(msg), "%u ", seqnum); tdb_trace_write(tdb, msg); tdb_trace_write(tdb, op); tdb_trace_end(tdb); } void tdb_trace_open(struct tdb_context *tdb, const char *op, unsigned hash_size, unsigned tdb_flags, unsigned open_flags) { char msg[128]; snprintf(msg, sizeof(msg), "%s %u 0x%x 0x%x", op, hash_size, tdb_flags, open_flags); tdb_trace_start(tdb); tdb_trace_write(tdb, msg); tdb_trace_end(tdb); } void tdb_trace_ret(struct tdb_context *tdb, const char *op, int ret) { tdb_trace_start(tdb); tdb_trace_write(tdb, op); tdb_trace_end_ret(tdb, ret); } void tdb_trace_retrec(struct tdb_context *tdb, const char *op, TDB_DATA ret) { tdb_trace_start(tdb); tdb_trace_write(tdb, op); tdb_trace_write(tdb, " ="); tdb_trace_record(tdb, ret); tdb_trace_end(tdb); } void tdb_trace_1rec(struct tdb_context *tdb, const char *op, TDB_DATA rec) { tdb_trace_start(tdb); tdb_trace_write(tdb, op); tdb_trace_record(tdb, rec); tdb_trace_end(tdb); } void tdb_trace_1rec_ret(struct tdb_context *tdb, const char *op, TDB_DATA rec, int ret) { tdb_trace_start(tdb); tdb_trace_write(tdb, op); tdb_trace_record(tdb, rec); tdb_trace_end_ret(tdb, ret); } void tdb_trace_1rec_retrec(struct tdb_context *tdb, const char *op, TDB_DATA rec, TDB_DATA ret) { tdb_trace_start(tdb); tdb_trace_write(tdb, op); tdb_trace_record(tdb, rec); tdb_trace_write(tdb, " ="); tdb_trace_record(tdb, ret); tdb_trace_end(tdb); } void tdb_trace_2rec_flag_ret(struct tdb_context *tdb, const char *op, TDB_DATA rec1, TDB_DATA rec2, unsigned flag, int ret) { char msg[1 + sizeof(ret) * 4]; snprintf(msg, sizeof(msg), " %#x", flag); tdb_trace_start(tdb); tdb_trace_write(tdb, op); tdb_trace_record(tdb, rec1); tdb_trace_record(tdb, rec2); tdb_trace_write(tdb, msg); tdb_trace_end_ret(tdb, ret); } void tdb_trace_1plusn_rec_flag_ret(struct tdb_context *tdb, const char *op, TDB_DATA rec, const TDB_DATA *recs, int num_recs, unsigned flag, int ret) { char msg[1 + sizeof(ret) * 4]; int i; snprintf(msg, sizeof(msg), " %#x", flag); tdb_trace_start(tdb); tdb_trace_write(tdb, op); tdb_trace_record(tdb, rec); for (i=0; if) #endif #define TDB_MAGIC_FOOD "TDB file\n" #define TDB_VERSION (0x26011967 + 6) #define TDB_MAGIC (0x26011999U) #define TDB_FREE_MAGIC (~TDB_MAGIC) #define TDB_DEAD_MAGIC (0xFEE1DEAD) #define TDB_RECOVERY_MAGIC (0xf53bc0e7U) #define TDB_RECOVERY_INVALID_MAGIC (0x0) #define TDB_HASH_RWLOCK_MAGIC (0xbad1a51U) #define TDB_FEATURE_FLAG_MAGIC (0xbad1a52U) #define TDB_ALIGNMENT 4 #define DEFAULT_HASH_SIZE 131 #define FREELIST_TOP (sizeof(struct tdb_header)) #define TDB_ALIGN(x,a) (((x) + (a)-1) & ~((a)-1)) #define TDB_BYTEREV(x) (((((x)&0xff)<<24)|((x)&0xFF00)<<8)|(((x)>>8)&0xFF00)|((x)>>24)) #define TDB_DEAD(r) ((r)->magic == TDB_DEAD_MAGIC) #define TDB_BAD_MAGIC(r) ((r)->magic != TDB_MAGIC && !TDB_DEAD(r)) #define TDB_HASH_TOP(hash) (FREELIST_TOP + (BUCKET(hash)+1)*sizeof(tdb_off_t)) #define TDB_HASHTABLE_SIZE(tdb) ((tdb->hash_size+1)*sizeof(tdb_off_t)) #define TDB_DATA_START(hash_size) (TDB_HASH_TOP(hash_size-1) + sizeof(tdb_off_t)) #define TDB_RECOVERY_HEAD offsetof(struct tdb_header, recovery_start) #define TDB_SEQNUM_OFS offsetof(struct tdb_header, sequence_number) #define TDB_PAD_BYTE 0x42 #define TDB_PAD_U32 0x42424242 #define TDB_FEATURE_FLAG_MUTEX 0x00000001 #define TDB_SUPPORTED_FEATURE_FLAGS ( \ TDB_FEATURE_FLAG_MUTEX | \ 0) /* NB assumes there is a local variable called "tdb" that is the * current context, also takes doubly-parenthesized print-style * argument. */ #define TDB_LOG(x) tdb->log.log_fn x #ifdef TDB_TRACE void tdb_trace(struct tdb_context *tdb, const char *op); void tdb_trace_seqnum(struct tdb_context *tdb, uint32_t seqnum, const char *op); void tdb_trace_open(struct tdb_context *tdb, const char *op, unsigned hash_size, unsigned tdb_flags, unsigned open_flags); void tdb_trace_ret(struct tdb_context *tdb, const char *op, int ret); void tdb_trace_retrec(struct tdb_context *tdb, const char *op, TDB_DATA ret); void tdb_trace_1rec(struct tdb_context *tdb, const char *op, TDB_DATA rec); void tdb_trace_1rec_ret(struct tdb_context *tdb, const char *op, TDB_DATA rec, int ret); void tdb_trace_1rec_retrec(struct tdb_context *tdb, const char *op, TDB_DATA rec, TDB_DATA ret); void tdb_trace_2rec_flag_ret(struct tdb_context *tdb, const char *op, TDB_DATA rec1, TDB_DATA rec2, unsigned flag, int ret); void tdb_trace_1plusn_rec_flag_ret(struct tdb_context *tdb, const char *op, TDB_DATA rec, const TDB_DATA *recs, int num_recs, unsigned flag, int ret); void tdb_trace_2rec_retrec(struct tdb_context *tdb, const char *op, TDB_DATA rec1, TDB_DATA rec2, TDB_DATA ret); #else #define tdb_trace(tdb, op) #define tdb_trace_seqnum(tdb, seqnum, op) #define tdb_trace_open(tdb, op, hash_size, tdb_flags, open_flags) #define tdb_trace_ret(tdb, op, ret) #define tdb_trace_retrec(tdb, op, ret) #define tdb_trace_1rec(tdb, op, rec) #define tdb_trace_1rec_ret(tdb, op, rec, ret) #define tdb_trace_1rec_retrec(tdb, op, rec, ret) #define tdb_trace_2rec_flag_ret(tdb, op, rec1, rec2, flag, ret) #define tdb_trace_1plusn_rec_flag_ret(tdb, op, rec, recs, num_recs, flag, ret); #define tdb_trace_2rec_retrec(tdb, op, rec1, rec2, ret) #endif /* !TDB_TRACE */ /* lock offsets */ #define OPEN_LOCK 0 #define ACTIVE_LOCK 4 #define TRANSACTION_LOCK 8 /* free memory if the pointer is valid and zero the pointer */ #ifndef SAFE_FREE #define SAFE_FREE(x) do { if ((x) != NULL) {free(x); (x)=NULL;} } while(0) #endif /* * Note: the BUCKET macro is broken as it returns an unexpected result when * called as BUCKET(-1) for the freelist: * * -1 is sign converted to an unsigned int 4294967295 and then the modulo * tdb->hashtable_size is computed. So with a hashtable_size of 10 the result * is * * 4294967295 % hashtable_size = 5. * * where it should be -1 (C uses symmetric modulo). * * As all callers will lock the same wrong list consistently locking is still * consistent. We can not change this without an incompatible on-disk format * change, otherwise different tdb versions would use incompatible locking. */ #define BUCKET(hash) ((hash) % tdb->hash_size) #define DOCONV() (tdb->flags & TDB_CONVERT) #define CONVERT(x) (DOCONV() ? tdb_convert(&x, sizeof(x)) : &x) /* the body of the database is made of one tdb_record for the free space plus a separate data list for each hash value */ struct tdb_record { tdb_off_t next; /* offset of the next record in the list */ tdb_len_t rec_len; /* total byte length of record */ tdb_len_t key_len; /* byte length of key */ tdb_len_t data_len; /* byte length of data */ uint32_t full_hash; /* the full 32 bit hash of the key */ uint32_t magic; /* try to catch errors */ /* the following union is implied: union { char record[rec_len]; struct { char key[key_len]; char data[data_len]; } uint32_t totalsize; (tailer) } */ }; /* this is stored at the front of every database */ struct tdb_header { char magic_food[32]; /* for /etc/magic */ uint32_t version; /* version of the code */ uint32_t hash_size; /* number of hash entries */ tdb_off_t rwlocks; /* obsolete - kept to detect old formats */ tdb_off_t recovery_start; /* offset of transaction recovery region */ tdb_off_t sequence_number; /* used when TDB_SEQNUM is set */ uint32_t magic1_hash; /* hash of TDB_MAGIC_FOOD. */ uint32_t magic2_hash; /* hash of TDB_MAGIC. */ uint32_t feature_flags; tdb_len_t mutex_size; /* set if TDB_FEATURE_FLAG_MUTEX is set */ tdb_off_t reserved[25]; }; struct tdb_lock_type { uint32_t off; uint32_t count; uint32_t ltype; }; struct tdb_chainwalk_ctx { tdb_off_t slow_ptr; bool slow_chase; }; struct tdb_traverse_lock { struct tdb_traverse_lock *next; uint32_t off; uint32_t list; int lock_rw; }; void tdb_chainwalk_init(struct tdb_chainwalk_ctx *ctx, tdb_off_t ptr); bool tdb_chainwalk_check(struct tdb_context *tdb, struct tdb_chainwalk_ctx *ctx, tdb_off_t next_ptr); enum tdb_lock_flags { /* WAIT == F_SETLKW, NOWAIT == F_SETLK */ TDB_LOCK_NOWAIT = 0, TDB_LOCK_WAIT = 1, /* If set, don't log an error on failure. */ TDB_LOCK_PROBE = 2, /* If set, don't actually lock at all. */ TDB_LOCK_MARK_ONLY = 4, }; struct tdb_methods { int (*tdb_read)(struct tdb_context *, tdb_off_t , void *, tdb_len_t , int ); int (*tdb_write)(struct tdb_context *, tdb_off_t, const void *, tdb_len_t); void (*next_hash_chain)(struct tdb_context *, uint32_t *); int (*tdb_oob)(struct tdb_context *, tdb_off_t , tdb_len_t, int ); int (*tdb_expand_file)(struct tdb_context *, tdb_off_t , tdb_off_t ); }; struct tdb_mutexes; struct tdb_context { char *name; /* the name of the database */ void *map_ptr; /* where it is currently mapped */ int fd; /* open file descriptor for the database */ tdb_len_t map_size; /* how much space has been mapped */ int read_only; /* opened read-only */ int traverse_read; /* read-only traversal */ int traverse_write; /* read-write traversal */ struct tdb_lock_type allrecord_lock; /* .offset == upgradable */ int num_lockrecs; struct tdb_lock_type *lockrecs; /* only real locks, all with count>0 */ int lockrecs_array_length; tdb_off_t hdr_ofs; /* this is 0 or header.mutex_size */ struct tdb_mutexes *mutexes; /* mmap of the mutex area */ enum TDB_ERROR ecode; /* error code for last tdb error */ uint32_t hash_size; uint32_t feature_flags; uint32_t flags; /* the flags passed to tdb_open */ struct tdb_traverse_lock travlocks; /* current traversal locks */ struct tdb_context *next; /* all tdbs to avoid multiple opens */ dev_t device; /* uniquely identifies this tdb */ ino_t inode; /* uniquely identifies this tdb */ struct tdb_logging_context log; unsigned int (*hash_fn)(TDB_DATA *key); int open_flags; /* flags used in the open - needed by reopen */ const struct tdb_methods *methods; struct tdb_transaction *transaction; int page_size; int max_dead_records; #ifdef TDB_TRACE int tracefd; #endif volatile sig_atomic_t *interrupt_sig_ptr; }; /* internal prototypes */ int tdb_munmap(struct tdb_context *tdb); int tdb_mmap(struct tdb_context *tdb); int tdb_lock(struct tdb_context *tdb, int list, int ltype); int tdb_lock_nonblock(struct tdb_context *tdb, int list, int ltype); int tdb_nest_lock(struct tdb_context *tdb, uint32_t offset, int ltype, enum tdb_lock_flags flags); int tdb_nest_unlock(struct tdb_context *tdb, uint32_t offset, int ltype, bool mark_lock); int tdb_unlock(struct tdb_context *tdb, int list, int ltype); int tdb_brlock(struct tdb_context *tdb, int rw_type, tdb_off_t offset, size_t len, enum tdb_lock_flags flags); int tdb_brunlock(struct tdb_context *tdb, int rw_type, tdb_off_t offset, size_t len); bool tdb_have_extra_locks(struct tdb_context *tdb); void tdb_release_transaction_locks(struct tdb_context *tdb); int tdb_transaction_lock(struct tdb_context *tdb, int ltype, enum tdb_lock_flags lockflags); int tdb_transaction_unlock(struct tdb_context *tdb, int ltype); int tdb_recovery_area(struct tdb_context *tdb, const struct tdb_methods *methods, tdb_off_t *recovery_offset, struct tdb_record *rec); int tdb_allrecord_lock(struct tdb_context *tdb, int ltype, enum tdb_lock_flags flags, bool upgradable); int tdb_allrecord_unlock(struct tdb_context *tdb, int ltype, bool mark_lock); int tdb_allrecord_upgrade(struct tdb_context *tdb); int tdb_write_lock_record(struct tdb_context *tdb, tdb_off_t off); int tdb_write_unlock_record(struct tdb_context *tdb, tdb_off_t off); int tdb_ofs_read(struct tdb_context *tdb, tdb_off_t offset, tdb_off_t *d); int tdb_ofs_write(struct tdb_context *tdb, tdb_off_t offset, tdb_off_t *d); void *tdb_convert(void *buf, uint32_t size); int tdb_free(struct tdb_context *tdb, tdb_off_t offset, struct tdb_record *rec); tdb_off_t tdb_allocate(struct tdb_context *tdb, int hash, tdb_len_t length, struct tdb_record *rec); int _tdb_oob(struct tdb_context *tdb, tdb_off_t off, tdb_len_t len, int probe); static inline int tdb_oob( struct tdb_context *tdb, tdb_off_t off, tdb_len_t len, int probe) { if (likely((off + len >= off) && (off + len <= tdb->map_size))) { return 0; } return _tdb_oob(tdb, off, len, probe); } int tdb_ofs_read(struct tdb_context *tdb, tdb_off_t offset, tdb_off_t *d); int tdb_ofs_write(struct tdb_context *tdb, tdb_off_t offset, tdb_off_t *d); int tdb_lock_record(struct tdb_context *tdb, tdb_off_t off); int tdb_unlock_record(struct tdb_context *tdb, tdb_off_t off); bool tdb_needs_recovery(struct tdb_context *tdb); int tdb_rec_read(struct tdb_context *tdb, tdb_off_t offset, struct tdb_record *rec); int tdb_rec_write(struct tdb_context *tdb, tdb_off_t offset, struct tdb_record *rec); unsigned char *tdb_alloc_read(struct tdb_context *tdb, tdb_off_t offset, tdb_len_t len); int tdb_parse_data(struct tdb_context *tdb, TDB_DATA key, tdb_off_t offset, tdb_len_t len, int (*parser)(TDB_DATA key, TDB_DATA data, void *private_data), void *private_data); tdb_off_t tdb_find_lock_hash(struct tdb_context *tdb, TDB_DATA key, uint32_t hash, int locktype, struct tdb_record *rec); tdb_off_t tdb_find_dead(struct tdb_context *tdb, uint32_t hash, struct tdb_record *r, tdb_len_t length, tdb_off_t *p_last_ptr); int tdb_trim_dead(struct tdb_context *tdb, uint32_t hash); void tdb_io_init(struct tdb_context *tdb); int tdb_expand(struct tdb_context *tdb, tdb_off_t size); tdb_off_t tdb_expand_adjust(tdb_off_t map_size, tdb_off_t size, int page_size); int tdb_rec_free_read(struct tdb_context *tdb, tdb_off_t off, struct tdb_record *rec); bool tdb_write_all(int fd, const void *buf, size_t count); int tdb_transaction_recover(struct tdb_context *tdb); void tdb_header_hash(struct tdb_context *tdb, uint32_t *magic1_hash, uint32_t *magic2_hash); unsigned int tdb_old_hash(TDB_DATA *key); size_t tdb_dead_space(struct tdb_context *tdb, tdb_off_t off); bool tdb_add_off_t(tdb_off_t a, tdb_off_t b, tdb_off_t *pret); /* tdb_off_t and tdb_len_t right now are both uint32_t */ #define tdb_add_len_t tdb_add_off_t size_t tdb_mutex_size(struct tdb_context *tdb); bool tdb_have_mutexes(struct tdb_context *tdb); int tdb_mutex_init(struct tdb_context *tdb); int tdb_mutex_mmap(struct tdb_context *tdb); int tdb_mutex_munmap(struct tdb_context *tdb); bool tdb_mutex_lock(struct tdb_context *tdb, int rw, off_t off, off_t len, bool waitflag, int *pret); bool tdb_mutex_unlock(struct tdb_context *tdb, int rw, off_t off, off_t len, int *pret); int tdb_mutex_allrecord_lock(struct tdb_context *tdb, int ltype, enum tdb_lock_flags flags); int tdb_mutex_allrecord_unlock(struct tdb_context *tdb); int tdb_mutex_allrecord_upgrade(struct tdb_context *tdb); void tdb_mutex_allrecord_downgrade(struct tdb_context *tdb); #endif /* TDB_PRIVATE_H */ tdb-1.4.2/common/transaction.c0000660000000000000000000011616513527011454016220 0ustar rootroot00000000000000 /* Unix SMB/CIFS implementation. trivial database library Copyright (C) Andrew Tridgell 2005 ** NOTE! The following LGPL license applies to the tdb ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "tdb_private.h" /* transaction design: - only allow a single transaction at a time per database. This makes using the transaction API simpler, as otherwise the caller would have to cope with temporary failures in transactions that conflict with other current transactions - keep the transaction recovery information in the same file as the database, using a special 'transaction recovery' record pointed at by the header. This removes the need for extra journal files as used by some other databases - dynamically allocated the transaction recover record, re-using it for subsequent transactions. If a larger record is needed then tdb_free() the old record to place it on the normal tdb freelist before allocating the new record - during transactions, keep a linked list of all writes that have been performed by intercepting all tdb_write() calls. The hooked transaction versions of tdb_read() and tdb_write() check this linked list and try to use the elements of the list in preference to the real database. - don't allow any locks to be held when a transaction starts, otherwise we can end up with deadlock (plus lack of lock nesting in posix locks would mean the lock is lost) - if the caller gains a lock during the transaction but doesn't release it then fail the commit - allow for nested calls to tdb_transaction_start(), re-using the existing transaction record. If the inner transaction is cancelled then a subsequent commit will fail - keep a mirrored copy of the tdb hash chain heads to allow for the fast hash heads scan on traverse, updating the mirrored copy in the transaction version of tdb_write - allow callers to mix transaction and non-transaction use of tdb, although once a transaction is started then an exclusive lock is gained until the transaction is committed or cancelled - the commit stategy involves first saving away all modified data into a linearised buffer in the transaction recovery area, then marking the transaction recovery area with a magic value to indicate a valid recovery record. In total 4 fsync/msync calls are needed per commit to prevent race conditions. It might be possible to reduce this to 3 or even 2 with some more work. - check for a valid recovery record on open of the tdb, while the open lock is held. Automatically recover from the transaction recovery area if needed, then continue with the open as usual. This allows for smooth crash recovery with no administrator intervention. - if TDB_NOSYNC is passed to flags in tdb_open then transactions are still available, but no fsync/msync calls are made. This means we are still proof against a process dying during transaction commit, but not against machine reboot. - if TDB_ALLOW_NESTING is passed to flags in tdb open, or added using tdb_add_flags() transaction nesting is enabled. It resets the TDB_DISALLOW_NESTING flag, as both cannot be used together. The default is that transaction nesting is allowed. Note: this default may change in future versions of tdb. Beware. when transactions are nested a transaction successfully completed with tdb_transaction_commit() can be silently unrolled later. - if TDB_DISALLOW_NESTING is passed to flags in tdb open, or added using tdb_add_flags() transaction nesting is disabled. It resets the TDB_ALLOW_NESTING flag, as both cannot be used together. An attempt create a nested transaction will fail with TDB_ERR_NESTING. The default is that transaction nesting is allowed. Note: this default may change in future versions of tdb. */ /* hold the context of any current transaction */ struct tdb_transaction { /* we keep a mirrored copy of the tdb hash heads here so tdb_next_hash_chain() can operate efficiently */ uint32_t *hash_heads; /* the original io methods - used to do IOs to the real db */ const struct tdb_methods *io_methods; /* the list of transaction blocks. When a block is first written to, it gets created in this list */ uint8_t **blocks; uint32_t num_blocks; uint32_t block_size; /* bytes in each block */ uint32_t last_block_size; /* number of valid bytes in the last block */ /* non-zero when an internal transaction error has occurred. All write operations will then fail until the transaction is ended */ int transaction_error; /* when inside a transaction we need to keep track of any nested tdb_transaction_start() calls, as these are allowed, but don't create a new transaction */ int nesting; /* set when a prepare has already occurred */ bool prepared; tdb_off_t magic_offset; /* old file size before transaction */ tdb_len_t old_map_size; /* did we expand in this transaction */ bool expanded; }; /* read while in a transaction. We need to check first if the data is in our list of transaction elements, then if not do a real read */ static int transaction_read(struct tdb_context *tdb, tdb_off_t off, void *buf, tdb_len_t len, int cv) { uint32_t blk; /* break it down into block sized ops */ while (len + (off % tdb->transaction->block_size) > tdb->transaction->block_size) { tdb_len_t len2 = tdb->transaction->block_size - (off % tdb->transaction->block_size); if (transaction_read(tdb, off, buf, len2, cv) != 0) { return -1; } len -= len2; off += len2; buf = (void *)(len2 + (char *)buf); } if (len == 0) { return 0; } blk = off / tdb->transaction->block_size; /* see if we have it in the block list */ if (tdb->transaction->num_blocks <= blk || tdb->transaction->blocks[blk] == NULL) { /* nope, do a real read */ if (tdb->transaction->io_methods->tdb_read(tdb, off, buf, len, cv) != 0) { goto fail; } return 0; } /* it is in the block list. Now check for the last block */ if (blk == tdb->transaction->num_blocks-1) { if (len > tdb->transaction->last_block_size) { goto fail; } } /* now copy it out of this block */ memcpy(buf, tdb->transaction->blocks[blk] + (off % tdb->transaction->block_size), len); if (cv) { tdb_convert(buf, len); } return 0; fail: TDB_LOG((tdb, TDB_DEBUG_FATAL, "transaction_read: failed at off=%u len=%u\n", off, len)); tdb->ecode = TDB_ERR_IO; tdb->transaction->transaction_error = 1; return -1; } /* write while in a transaction */ static int transaction_write(struct tdb_context *tdb, tdb_off_t off, const void *buf, tdb_len_t len) { uint32_t blk; if (buf == NULL) { return -1; } /* Only a commit is allowed on a prepared transaction */ if (tdb->transaction->prepared) { tdb->ecode = TDB_ERR_EINVAL; TDB_LOG((tdb, TDB_DEBUG_FATAL, "transaction_write: transaction already prepared, write not allowed\n")); tdb->transaction->transaction_error = 1; return -1; } /* if the write is to a hash head, then update the transaction hash heads */ if (len == sizeof(tdb_off_t) && off >= FREELIST_TOP && off < FREELIST_TOP+TDB_HASHTABLE_SIZE(tdb)) { uint32_t chain = (off-FREELIST_TOP) / sizeof(tdb_off_t); memcpy(&tdb->transaction->hash_heads[chain], buf, len); } /* break it up into block sized chunks */ while (len + (off % tdb->transaction->block_size) > tdb->transaction->block_size) { tdb_len_t len2 = tdb->transaction->block_size - (off % tdb->transaction->block_size); if (transaction_write(tdb, off, buf, len2) != 0) { return -1; } len -= len2; off += len2; buf = (const void *)(len2 + (const char *)buf); } if (len == 0) { return 0; } blk = off / tdb->transaction->block_size; off = off % tdb->transaction->block_size; if (tdb->transaction->num_blocks <= blk) { uint8_t **new_blocks; /* expand the blocks array */ new_blocks = (uint8_t **)realloc(tdb->transaction->blocks, (blk+1)*sizeof(uint8_t *)); if (new_blocks == NULL) { tdb->ecode = TDB_ERR_OOM; goto fail; } memset(&new_blocks[tdb->transaction->num_blocks], 0, (1+(blk - tdb->transaction->num_blocks))*sizeof(uint8_t *)); tdb->transaction->blocks = new_blocks; tdb->transaction->num_blocks = blk+1; tdb->transaction->last_block_size = 0; } /* allocate and fill a block? */ if (tdb->transaction->blocks[blk] == NULL) { tdb->transaction->blocks[blk] = (uint8_t *)calloc(tdb->transaction->block_size, 1); if (tdb->transaction->blocks[blk] == NULL) { tdb->ecode = TDB_ERR_OOM; tdb->transaction->transaction_error = 1; return -1; } if (tdb->transaction->old_map_size > blk * tdb->transaction->block_size) { tdb_len_t len2 = tdb->transaction->block_size; if (len2 + (blk * tdb->transaction->block_size) > tdb->transaction->old_map_size) { len2 = tdb->transaction->old_map_size - (blk * tdb->transaction->block_size); } if (tdb->transaction->io_methods->tdb_read(tdb, blk * tdb->transaction->block_size, tdb->transaction->blocks[blk], len2, 0) != 0) { SAFE_FREE(tdb->transaction->blocks[blk]); tdb->ecode = TDB_ERR_IO; goto fail; } if (blk == tdb->transaction->num_blocks-1) { tdb->transaction->last_block_size = len2; } } } /* overwrite part of an existing block */ memcpy(tdb->transaction->blocks[blk] + off, buf, len); if (blk == tdb->transaction->num_blocks-1) { if (len + off > tdb->transaction->last_block_size) { tdb->transaction->last_block_size = len + off; } } return 0; fail: TDB_LOG((tdb, TDB_DEBUG_FATAL, "transaction_write: failed at off=%u len=%u\n", (blk*tdb->transaction->block_size) + off, len)); tdb->transaction->transaction_error = 1; return -1; } /* write while in a transaction - this variant never expands the transaction blocks, it only updates existing blocks. This means it cannot change the recovery size */ static int transaction_write_existing(struct tdb_context *tdb, tdb_off_t off, const void *buf, tdb_len_t len) { uint32_t blk; /* break it up into block sized chunks */ while (len + (off % tdb->transaction->block_size) > tdb->transaction->block_size) { tdb_len_t len2 = tdb->transaction->block_size - (off % tdb->transaction->block_size); if (transaction_write_existing(tdb, off, buf, len2) != 0) { return -1; } len -= len2; off += len2; if (buf != NULL) { buf = (const void *)(len2 + (const char *)buf); } } if (len == 0 || buf == NULL) { return 0; } blk = off / tdb->transaction->block_size; off = off % tdb->transaction->block_size; if (tdb->transaction->num_blocks <= blk || tdb->transaction->blocks[blk] == NULL) { return 0; } if (blk == tdb->transaction->num_blocks-1 && off + len > tdb->transaction->last_block_size) { if (off >= tdb->transaction->last_block_size) { return 0; } len = tdb->transaction->last_block_size - off; } /* overwrite part of an existing block */ memcpy(tdb->transaction->blocks[blk] + off, buf, len); return 0; } /* accelerated hash chain head search, using the cached hash heads */ static void transaction_next_hash_chain(struct tdb_context *tdb, uint32_t *chain) { uint32_t h = *chain; for (;h < tdb->hash_size;h++) { /* the +1 takes account of the freelist */ if (0 != tdb->transaction->hash_heads[h+1]) { break; } } (*chain) = h; } /* out of bounds check during a transaction */ static int transaction_oob(struct tdb_context *tdb, tdb_off_t off, tdb_len_t len, int probe) { /* * This duplicates functionality from tdb_oob(). Don't remove: * we still have direct callers of tdb->methods->tdb_oob() * inside transaction.c. */ if (off + len >= off && off + len <= tdb->map_size) { return 0; } tdb->ecode = TDB_ERR_IO; return -1; } /* transaction version of tdb_expand(). */ static int transaction_expand_file(struct tdb_context *tdb, tdb_off_t size, tdb_off_t addition) { const char buf_zero[8192] = {0}; size_t buf_len = sizeof(buf_zero); while (addition > 0) { size_t n = MIN(addition, buf_len); int ret; ret = transaction_write(tdb, size, buf_zero, n); if (ret != 0) { return ret; } addition -= n; size += n; } tdb->transaction->expanded = true; return 0; } static const struct tdb_methods transaction_methods = { transaction_read, transaction_write, transaction_next_hash_chain, transaction_oob, transaction_expand_file, }; /* * Is a transaction currently active on this context? * */ _PUBLIC_ bool tdb_transaction_active(struct tdb_context *tdb) { return (tdb->transaction != NULL); } /* start a tdb transaction. No token is returned, as only a single transaction is allowed to be pending per tdb_context */ static int _tdb_transaction_start(struct tdb_context *tdb, enum tdb_lock_flags lockflags) { /* some sanity checks */ if (tdb->read_only || (tdb->flags & TDB_INTERNAL) || tdb->traverse_read) { TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_start: cannot start a transaction on a read-only or internal db\n")); tdb->ecode = TDB_ERR_EINVAL; return -1; } /* cope with nested tdb_transaction_start() calls */ if (tdb->transaction != NULL) { if (!(tdb->flags & TDB_ALLOW_NESTING)) { tdb->ecode = TDB_ERR_NESTING; return -1; } tdb->transaction->nesting++; TDB_LOG((tdb, TDB_DEBUG_TRACE, "tdb_transaction_start: nesting %d\n", tdb->transaction->nesting)); return 0; } if (tdb_have_extra_locks(tdb)) { /* the caller must not have any locks when starting a transaction as otherwise we'll be screwed by lack of nested locks in posix */ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_start: cannot start a transaction with locks held\n")); tdb->ecode = TDB_ERR_LOCK; return -1; } if (tdb->travlocks.next != NULL) { /* you cannot use transactions inside a traverse (although you can use traverse inside a transaction) as otherwise you can end up with deadlock */ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_start: cannot start a transaction within a traverse\n")); tdb->ecode = TDB_ERR_LOCK; return -1; } tdb->transaction = (struct tdb_transaction *) calloc(sizeof(struct tdb_transaction), 1); if (tdb->transaction == NULL) { tdb->ecode = TDB_ERR_OOM; return -1; } /* a page at a time seems like a reasonable compromise between compactness and efficiency */ tdb->transaction->block_size = tdb->page_size; /* get the transaction write lock. This is a blocking lock. As discussed with Volker, there are a number of ways we could make this async, which we will probably do in the future */ if (tdb_transaction_lock(tdb, F_WRLCK, lockflags) == -1) { SAFE_FREE(tdb->transaction->blocks); SAFE_FREE(tdb->transaction); if ((lockflags & TDB_LOCK_WAIT) == 0) { tdb->ecode = TDB_ERR_NOLOCK; } else { TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_start: " "failed to get transaction lock\n")); } return -1; } /* get a read lock from the freelist to the end of file. This is upgraded to a write lock during the commit */ if (tdb_allrecord_lock(tdb, F_RDLCK, TDB_LOCK_WAIT, true) == -1) { TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_start: failed to get hash locks\n")); goto fail_allrecord_lock; } /* setup a copy of the hash table heads so the hash scan in traverse can be fast */ tdb->transaction->hash_heads = (uint32_t *) calloc(tdb->hash_size+1, sizeof(uint32_t)); if (tdb->transaction->hash_heads == NULL) { tdb->ecode = TDB_ERR_OOM; goto fail; } if (tdb->methods->tdb_read(tdb, FREELIST_TOP, tdb->transaction->hash_heads, TDB_HASHTABLE_SIZE(tdb), 0) != 0) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_start: failed to read hash heads\n")); tdb->ecode = TDB_ERR_IO; goto fail; } /* make sure we know about any file expansions already done by anyone else */ tdb_oob(tdb, tdb->map_size, 1, 1); tdb->transaction->old_map_size = tdb->map_size; /* finally hook the io methods, replacing them with transaction specific methods */ tdb->transaction->io_methods = tdb->methods; tdb->methods = &transaction_methods; /* Trace at the end, so we get sequence number correct. */ tdb_trace(tdb, "tdb_transaction_start"); return 0; fail: tdb_allrecord_unlock(tdb, F_RDLCK, false); fail_allrecord_lock: tdb_transaction_unlock(tdb, F_WRLCK); SAFE_FREE(tdb->transaction->blocks); SAFE_FREE(tdb->transaction->hash_heads); SAFE_FREE(tdb->transaction); return -1; } _PUBLIC_ int tdb_transaction_start(struct tdb_context *tdb) { return _tdb_transaction_start(tdb, TDB_LOCK_WAIT); } _PUBLIC_ int tdb_transaction_start_nonblock(struct tdb_context *tdb) { return _tdb_transaction_start(tdb, TDB_LOCK_NOWAIT|TDB_LOCK_PROBE); } /* sync to disk */ static int transaction_sync(struct tdb_context *tdb, tdb_off_t offset, tdb_len_t length) { if (tdb->flags & TDB_NOSYNC) { return 0; } #ifdef HAVE_FDATASYNC if (fdatasync(tdb->fd) != 0) { #else if (fsync(tdb->fd) != 0) { #endif tdb->ecode = TDB_ERR_IO; TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction: fsync failed\n")); return -1; } #ifdef HAVE_MMAP if (tdb->map_ptr) { tdb_off_t moffset = offset & ~(tdb->page_size-1); if (msync(moffset + (char *)tdb->map_ptr, length + (offset - moffset), MS_SYNC) != 0) { tdb->ecode = TDB_ERR_IO; TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction: msync failed - %s\n", strerror(errno))); return -1; } } #endif return 0; } static int _tdb_transaction_cancel(struct tdb_context *tdb) { uint32_t i; int ret = 0; if (tdb->transaction == NULL) { TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_cancel: no transaction\n")); return -1; } if (tdb->transaction->nesting != 0) { tdb->transaction->transaction_error = 1; tdb->transaction->nesting--; return 0; } tdb->map_size = tdb->transaction->old_map_size; /* free all the transaction blocks */ for (i=0;itransaction->num_blocks;i++) { if ((tdb->transaction->blocks != NULL) && tdb->transaction->blocks[i] != NULL) { free(tdb->transaction->blocks[i]); } } SAFE_FREE(tdb->transaction->blocks); if (tdb->transaction->magic_offset) { const struct tdb_methods *methods = tdb->transaction->io_methods; const uint32_t invalid = TDB_RECOVERY_INVALID_MAGIC; /* remove the recovery marker */ if (methods->tdb_write(tdb, tdb->transaction->magic_offset, &invalid, 4) == -1 || transaction_sync(tdb, tdb->transaction->magic_offset, 4) == -1) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_cancel: failed to remove recovery magic\n")); ret = -1; } } /* This also removes the OPEN_LOCK, if we have it. */ tdb_release_transaction_locks(tdb); /* restore the normal io methods */ tdb->methods = tdb->transaction->io_methods; SAFE_FREE(tdb->transaction->hash_heads); SAFE_FREE(tdb->transaction); return ret; } /* cancel the current transaction */ _PUBLIC_ int tdb_transaction_cancel(struct tdb_context *tdb) { tdb_trace(tdb, "tdb_transaction_cancel"); return _tdb_transaction_cancel(tdb); } /* work out how much space the linearised recovery data will consume */ static bool tdb_recovery_size(struct tdb_context *tdb, tdb_len_t *result) { tdb_len_t recovery_size = 0; uint32_t i; recovery_size = sizeof(uint32_t); for (i=0;itransaction->num_blocks;i++) { tdb_len_t block_size; if (i * tdb->transaction->block_size >= tdb->transaction->old_map_size) { break; } if (tdb->transaction->blocks[i] == NULL) { continue; } if (!tdb_add_len_t(recovery_size, 2*sizeof(tdb_off_t), &recovery_size)) { return false; } if (i == tdb->transaction->num_blocks-1) { block_size = tdb->transaction->last_block_size; } else { block_size = tdb->transaction->block_size; } if (!tdb_add_len_t(recovery_size, block_size, &recovery_size)) { return false; } } *result = recovery_size; return true; } int tdb_recovery_area(struct tdb_context *tdb, const struct tdb_methods *methods, tdb_off_t *recovery_offset, struct tdb_record *rec) { int ret; if (tdb_ofs_read(tdb, TDB_RECOVERY_HEAD, recovery_offset) == -1) { return -1; } if (*recovery_offset == 0) { rec->rec_len = 0; return 0; } if (methods->tdb_read(tdb, *recovery_offset, rec, sizeof(*rec), DOCONV()) == -1) { return -1; } /* ignore invalid recovery regions: can happen in crash */ if (rec->magic != TDB_RECOVERY_MAGIC && rec->magic != TDB_RECOVERY_INVALID_MAGIC) { *recovery_offset = 0; rec->rec_len = 0; } ret = methods->tdb_oob(tdb, *recovery_offset, rec->rec_len, 1); if (ret == -1) { *recovery_offset = 0; rec->rec_len = 0; } return 0; } /* allocate the recovery area, or use an existing recovery area if it is large enough */ static int tdb_recovery_allocate(struct tdb_context *tdb, tdb_len_t *recovery_size, tdb_off_t *recovery_offset, tdb_len_t *recovery_max_size) { struct tdb_record rec; const struct tdb_methods *methods = tdb->transaction->io_methods; tdb_off_t recovery_head, new_end; if (tdb_recovery_area(tdb, methods, &recovery_head, &rec) == -1) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_recovery_allocate: failed to read recovery head\n")); return -1; } if (!tdb_recovery_size(tdb, recovery_size)) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_recovery_allocate: " "overflow recovery size\n")); return -1; } /* Existing recovery area? */ if (recovery_head != 0 && *recovery_size <= rec.rec_len) { /* it fits in the existing area */ *recovery_max_size = rec.rec_len; *recovery_offset = recovery_head; return 0; } /* If recovery area in middle of file, we need a new one. */ if (recovery_head == 0 || recovery_head + sizeof(rec) + rec.rec_len != tdb->map_size) { /* we need to free up the old recovery area, then allocate a new one at the end of the file. Note that we cannot use tdb_allocate() to allocate the new one as that might return us an area that is being currently used (as of the start of the transaction) */ if (recovery_head) { if (tdb_free(tdb, recovery_head, &rec) == -1) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_recovery_allocate: failed to" " free previous recovery area\n")); return -1; } /* the tdb_free() call might have increased * the recovery size */ if (!tdb_recovery_size(tdb, recovery_size)) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_recovery_allocate: " "overflow recovery size\n")); return -1; } } /* New head will be at end of file. */ recovery_head = tdb->map_size; } /* Now we know where it will be. */ *recovery_offset = recovery_head; /* Expand by more than we need, so we don't do it often. */ *recovery_max_size = tdb_expand_adjust(tdb->map_size, *recovery_size, tdb->page_size) - sizeof(rec); if (!tdb_add_off_t(recovery_head, sizeof(rec), &new_end) || !tdb_add_off_t(new_end, *recovery_max_size, &new_end)) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_recovery_allocate: " "overflow recovery area\n")); return -1; } if (methods->tdb_expand_file(tdb, tdb->transaction->old_map_size, new_end - tdb->transaction->old_map_size) == -1) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_recovery_allocate: failed to create recovery area\n")); return -1; } /* remap the file (if using mmap) */ methods->tdb_oob(tdb, tdb->map_size, 1, 1); /* we have to reset the old map size so that we don't try to expand the file again in the transaction commit, which would destroy the recovery area */ tdb->transaction->old_map_size = tdb->map_size; /* write the recovery header offset and sync - we can sync without a race here as the magic ptr in the recovery record has not been set */ CONVERT(recovery_head); if (methods->tdb_write(tdb, TDB_RECOVERY_HEAD, &recovery_head, sizeof(tdb_off_t)) == -1) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_recovery_allocate: failed to write recovery head\n")); return -1; } if (transaction_write_existing(tdb, TDB_RECOVERY_HEAD, &recovery_head, sizeof(tdb_off_t)) == -1) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_recovery_allocate: failed to write recovery head\n")); return -1; } return 0; } /* setup the recovery data that will be used on a crash during commit */ static int transaction_setup_recovery(struct tdb_context *tdb, tdb_off_t *magic_offset) { tdb_len_t recovery_size; unsigned char *data, *p; const struct tdb_methods *methods = tdb->transaction->io_methods; struct tdb_record *rec; tdb_off_t recovery_offset, recovery_max_size; tdb_off_t old_map_size = tdb->transaction->old_map_size; uint32_t magic, tailer; uint32_t i; /* check that the recovery area has enough space */ if (tdb_recovery_allocate(tdb, &recovery_size, &recovery_offset, &recovery_max_size) == -1) { return -1; } rec = malloc(recovery_size + sizeof(*rec)); if (rec == NULL) { tdb->ecode = TDB_ERR_OOM; return -1; } memset(rec, 0, sizeof(*rec)); rec->magic = TDB_RECOVERY_INVALID_MAGIC; rec->data_len = recovery_size; rec->rec_len = recovery_max_size; rec->key_len = old_map_size; CONVERT(*rec); data = (unsigned char *)rec; /* build the recovery data into a single blob to allow us to do a single large write, which should be more efficient */ p = data + sizeof(*rec); for (i=0;itransaction->num_blocks;i++) { tdb_off_t offset; tdb_len_t length; if (tdb->transaction->blocks[i] == NULL) { continue; } offset = i * tdb->transaction->block_size; length = tdb->transaction->block_size; if (i == tdb->transaction->num_blocks-1) { length = tdb->transaction->last_block_size; } if (offset >= old_map_size) { continue; } if (offset + length > tdb->transaction->old_map_size) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_setup_recovery: transaction data over new region boundary\n")); free(data); tdb->ecode = TDB_ERR_CORRUPT; return -1; } memcpy(p, &offset, 4); memcpy(p+4, &length, 4); if (DOCONV()) { tdb_convert(p, 8); } /* the recovery area contains the old data, not the new data, so we have to call the original tdb_read method to get it */ if (methods->tdb_read(tdb, offset, p + 8, length, 0) != 0) { free(data); tdb->ecode = TDB_ERR_IO; return -1; } p += 8 + length; } /* and the tailer */ tailer = sizeof(*rec) + recovery_max_size; memcpy(p, &tailer, 4); if (DOCONV()) { tdb_convert(p, 4); } /* write the recovery data to the recovery area */ if (methods->tdb_write(tdb, recovery_offset, data, sizeof(*rec) + recovery_size) == -1) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_setup_recovery: failed to write recovery data\n")); free(data); tdb->ecode = TDB_ERR_IO; return -1; } if (transaction_write_existing(tdb, recovery_offset, data, sizeof(*rec) + recovery_size) == -1) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_setup_recovery: failed to write secondary recovery data\n")); free(data); tdb->ecode = TDB_ERR_IO; return -1; } /* as we don't have ordered writes, we have to sync the recovery data before we update the magic to indicate that the recovery data is present */ if (transaction_sync(tdb, recovery_offset, sizeof(*rec) + recovery_size) == -1) { free(data); return -1; } free(data); magic = TDB_RECOVERY_MAGIC; CONVERT(magic); *magic_offset = recovery_offset + offsetof(struct tdb_record, magic); if (methods->tdb_write(tdb, *magic_offset, &magic, sizeof(magic)) == -1) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_setup_recovery: failed to write recovery magic\n")); tdb->ecode = TDB_ERR_IO; return -1; } if (transaction_write_existing(tdb, *magic_offset, &magic, sizeof(magic)) == -1) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_setup_recovery: failed to write secondary recovery magic\n")); tdb->ecode = TDB_ERR_IO; return -1; } /* ensure the recovery magic marker is on disk */ if (transaction_sync(tdb, *magic_offset, sizeof(magic)) == -1) { return -1; } return 0; } static int _tdb_transaction_prepare_commit(struct tdb_context *tdb) { const struct tdb_methods *methods; if (tdb->transaction == NULL) { TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_prepare_commit: no transaction\n")); return -1; } if (tdb->transaction->prepared) { tdb->ecode = TDB_ERR_EINVAL; _tdb_transaction_cancel(tdb); TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_prepare_commit: transaction already prepared\n")); return -1; } if (tdb->transaction->transaction_error) { tdb->ecode = TDB_ERR_IO; _tdb_transaction_cancel(tdb); TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_prepare_commit: transaction error pending\n")); return -1; } if (tdb->transaction->nesting != 0) { return 0; } /* check for a null transaction */ if (tdb->transaction->blocks == NULL) { return 0; } methods = tdb->transaction->io_methods; /* if there are any locks pending then the caller has not nested their locks properly, so fail the transaction */ if (tdb_have_extra_locks(tdb)) { tdb->ecode = TDB_ERR_LOCK; TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_prepare_commit: locks pending on commit\n")); _tdb_transaction_cancel(tdb); return -1; } /* upgrade the main transaction lock region to a write lock */ if (tdb_allrecord_upgrade(tdb) == -1) { if (tdb->ecode == TDB_ERR_RDONLY && tdb->read_only) { TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_prepare_commit: " "failed to upgrade hash locks: " "database is read only\n")); } else if (tdb->ecode == TDB_ERR_RDONLY && tdb->traverse_read) { TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_prepare_commit: " "failed to upgrade hash locks: " "a database traverse is in progress\n")); } else { TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_prepare_commit: " "failed to upgrade hash locks: %s\n", tdb_errorstr(tdb))); } _tdb_transaction_cancel(tdb); return -1; } /* get the open lock - this prevents new users attaching to the database during the commit */ if (tdb_nest_lock(tdb, OPEN_LOCK, F_WRLCK, TDB_LOCK_WAIT) == -1) { TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_prepare_commit: failed to get open lock\n")); _tdb_transaction_cancel(tdb); return -1; } /* write the recovery data to the end of the file */ if (transaction_setup_recovery(tdb, &tdb->transaction->magic_offset) == -1) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_prepare_commit: failed to setup recovery data\n")); _tdb_transaction_cancel(tdb); return -1; } tdb->transaction->prepared = true; /* expand the file to the new size if needed */ if (tdb->map_size != tdb->transaction->old_map_size) { if (methods->tdb_expand_file(tdb, tdb->transaction->old_map_size, tdb->map_size - tdb->transaction->old_map_size) == -1) { tdb->ecode = TDB_ERR_IO; TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_prepare_commit: expansion failed\n")); _tdb_transaction_cancel(tdb); return -1; } tdb->map_size = tdb->transaction->old_map_size; methods->tdb_oob(tdb, tdb->map_size, 1, 1); } /* Keep the open lock until the actual commit */ return 0; } /* prepare to commit the current transaction */ _PUBLIC_ int tdb_transaction_prepare_commit(struct tdb_context *tdb) { tdb_trace(tdb, "tdb_transaction_prepare_commit"); return _tdb_transaction_prepare_commit(tdb); } /* A repack is worthwhile if the largest is less than half total free. */ static bool repack_worthwhile(struct tdb_context *tdb) { tdb_off_t ptr; struct tdb_record rec; tdb_len_t total = 0, largest = 0; if (tdb_ofs_read(tdb, FREELIST_TOP, &ptr) == -1) { return false; } while (ptr != 0 && tdb_rec_free_read(tdb, ptr, &rec) == 0) { total += rec.rec_len; if (rec.rec_len > largest) { largest = rec.rec_len; } ptr = rec.next; } return total > largest * 2; } /* commit the current transaction */ _PUBLIC_ int tdb_transaction_commit(struct tdb_context *tdb) { const struct tdb_methods *methods; uint32_t i; bool need_repack = false; if (tdb->transaction == NULL) { TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_commit: no transaction\n")); return -1; } tdb_trace(tdb, "tdb_transaction_commit"); if (tdb->transaction->transaction_error) { tdb->ecode = TDB_ERR_IO; _tdb_transaction_cancel(tdb); TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_commit: transaction error pending\n")); return -1; } if (tdb->transaction->nesting != 0) { tdb->transaction->nesting--; return 0; } /* check for a null transaction */ if (tdb->transaction->blocks == NULL) { _tdb_transaction_cancel(tdb); return 0; } if (!tdb->transaction->prepared) { int ret = _tdb_transaction_prepare_commit(tdb); if (ret) return ret; } methods = tdb->transaction->io_methods; /* perform all the writes */ for (i=0;itransaction->num_blocks;i++) { tdb_off_t offset; tdb_len_t length; if (tdb->transaction->blocks[i] == NULL) { continue; } offset = i * tdb->transaction->block_size; length = tdb->transaction->block_size; if (i == tdb->transaction->num_blocks-1) { length = tdb->transaction->last_block_size; } if (methods->tdb_write(tdb, offset, tdb->transaction->blocks[i], length) == -1) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_commit: write failed during commit\n")); /* we've overwritten part of the data and possibly expanded the file, so we need to run the crash recovery code */ tdb->methods = methods; tdb_transaction_recover(tdb); _tdb_transaction_cancel(tdb); TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_commit: write failed\n")); return -1; } SAFE_FREE(tdb->transaction->blocks[i]); } /* Do this before we drop lock or blocks. */ if (tdb->transaction->expanded) { need_repack = repack_worthwhile(tdb); } SAFE_FREE(tdb->transaction->blocks); tdb->transaction->num_blocks = 0; /* ensure the new data is on disk */ if (transaction_sync(tdb, 0, tdb->map_size) == -1) { return -1; } /* TODO: maybe write to some dummy hdr field, or write to magic offset without mmap, before the last sync, instead of the utime() call */ /* on some systems (like Linux 2.6.x) changes via mmap/msync don't change the mtime of the file, this means the file may not be backed up (as tdb rounding to block sizes means that file size changes are quite rare too). The following forces mtime changes when a transaction completes */ #ifdef HAVE_UTIME utime(tdb->name, NULL); #endif /* use a transaction cancel to free memory and remove the transaction locks */ _tdb_transaction_cancel(tdb); if (need_repack) { int ret = tdb_repack(tdb); if (ret != 0) { TDB_LOG((tdb, TDB_DEBUG_FATAL, __location__ " Failed to repack database (not fatal)\n")); } /* * Ignore the error. * * Why? * * We just committed to the DB above, so anything * written during the transaction is committed, the * caller needs to know that the long-term state was * successfully modified. * * tdb_repack is an optimization that can fail for * reasons like lock ordering and we cannot recover * the transaction lock at this point, having released * it above. * * If we return a failure the caller thinks the * transaction was rolled back. */ } return 0; } /* recover from an aborted transaction. Must be called with exclusive database write access already established (including the open lock to prevent new processes attaching) */ int tdb_transaction_recover(struct tdb_context *tdb) { tdb_off_t recovery_head, recovery_eof; unsigned char *data, *p; uint32_t zero = 0; struct tdb_record rec; /* find the recovery area */ if (tdb_ofs_read(tdb, TDB_RECOVERY_HEAD, &recovery_head) == -1) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_recover: failed to read recovery head\n")); tdb->ecode = TDB_ERR_IO; return -1; } if (recovery_head == 0) { /* we have never allocated a recovery record */ return 0; } /* read the recovery record */ if (tdb->methods->tdb_read(tdb, recovery_head, &rec, sizeof(rec), DOCONV()) == -1) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_recover: failed to read recovery record\n")); tdb->ecode = TDB_ERR_IO; return -1; } if (rec.magic != TDB_RECOVERY_MAGIC) { /* there is no valid recovery data */ return 0; } if (tdb->read_only) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_recover: attempt to recover read only database\n")); tdb->ecode = TDB_ERR_CORRUPT; return -1; } recovery_eof = rec.key_len; data = (unsigned char *)malloc(rec.data_len); if (data == NULL) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_recover: failed to allocate recovery data\n")); tdb->ecode = TDB_ERR_OOM; return -1; } /* read the full recovery data */ if (tdb->methods->tdb_read(tdb, recovery_head + sizeof(rec), data, rec.data_len, 0) == -1) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_recover: failed to read recovery data\n")); tdb->ecode = TDB_ERR_IO; return -1; } /* recover the file data */ p = data; while (p+8 < data + rec.data_len) { uint32_t ofs, len; if (DOCONV()) { tdb_convert(p, 8); } memcpy(&ofs, p, 4); memcpy(&len, p+4, 4); if (tdb->methods->tdb_write(tdb, ofs, p+8, len) == -1) { free(data); TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_recover: failed to recover %u bytes at offset %u\n", len, ofs)); tdb->ecode = TDB_ERR_IO; return -1; } p += 8 + len; } free(data); if (transaction_sync(tdb, 0, tdb->map_size) == -1) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_recover: failed to sync recovery\n")); tdb->ecode = TDB_ERR_IO; return -1; } /* if the recovery area is after the recovered eof then remove it */ if (recovery_eof <= recovery_head) { if (tdb_ofs_write(tdb, TDB_RECOVERY_HEAD, &zero) == -1) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_recover: failed to remove recovery head\n")); tdb->ecode = TDB_ERR_IO; return -1; } } /* remove the recovery magic */ if (tdb_ofs_write(tdb, recovery_head + offsetof(struct tdb_record, magic), &zero) == -1) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_recover: failed to remove recovery magic\n")); tdb->ecode = TDB_ERR_IO; return -1; } if (transaction_sync(tdb, 0, recovery_eof) == -1) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_recover: failed to sync2 recovery\n")); tdb->ecode = TDB_ERR_IO; return -1; } TDB_LOG((tdb, TDB_DEBUG_TRACE, "tdb_transaction_recover: recovered %u byte database\n", recovery_eof)); /* all done */ return 0; } /* Any I/O failures we say "needs recovery". */ bool tdb_needs_recovery(struct tdb_context *tdb) { tdb_off_t recovery_head; struct tdb_record rec; /* find the recovery area */ if (tdb_ofs_read(tdb, TDB_RECOVERY_HEAD, &recovery_head) == -1) { return true; } if (recovery_head == 0) { /* we have never allocated a recovery record */ return false; } /* read the recovery record */ if (tdb->methods->tdb_read(tdb, recovery_head, &rec, sizeof(rec), DOCONV()) == -1) { return true; } return (rec.magic == TDB_RECOVERY_MAGIC); } tdb-1.4.2/common/traverse.c0000660000000000000000000003312613527011454015521 0ustar rootroot00000000000000 /* Unix SMB/CIFS implementation. trivial database library Copyright (C) Andrew Tridgell 1999-2005 Copyright (C) Paul `Rusty' Russell 2000 Copyright (C) Jeremy Allison 2000-2003 ** NOTE! The following LGPL license applies to the tdb ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "tdb_private.h" #define TDB_NEXT_LOCK_ERR ((tdb_off_t)-1) /* Uses traverse lock: 0 = finish, TDB_NEXT_LOCK_ERR = error, other = record offset */ static tdb_off_t tdb_next_lock(struct tdb_context *tdb, struct tdb_traverse_lock *tlock, struct tdb_record *rec) { int want_next = (tlock->off != 0); /* Lock each chain from the start one. */ for (; tlock->list < tdb->hash_size; tlock->list++) { if (!tlock->off && tlock->list != 0) { /* this is an optimisation for the common case where the hash chain is empty, which is particularly common for the use of tdb with ldb, where large hashes are used. In that case we spend most of our time in tdb_brlock(), locking empty hash chains. To avoid this, we do an unlocked pre-check to see if the hash chain is empty before starting to look inside it. If it is empty then we can avoid that hash chain. If it isn't empty then we can't believe the value we get back, as we read it without a lock, so instead we get the lock and re-fetch the value below. Notice that not doing this optimisation on the first hash chain is critical. We must guarantee that we have done at least one fcntl lock at the start of a search to guarantee that memory is coherent on SMP systems. If records are added by others during the search then thats OK, and we could possibly miss those with this trick, but we could miss them anyway without this trick, so the semantics don't change. With a non-indexed ldb search this trick gains us a factor of around 80 in speed on a linux 2.6.x system (testing using ldbtest). */ tdb->methods->next_hash_chain(tdb, &tlock->list); if (tlock->list == tdb->hash_size) { continue; } } if (tdb_lock(tdb, tlock->list, tlock->lock_rw) == -1) return TDB_NEXT_LOCK_ERR; /* No previous record? Start at top of chain. */ if (!tlock->off) { if (tdb_ofs_read(tdb, TDB_HASH_TOP(tlock->list), &tlock->off) == -1) goto fail; } else { /* Otherwise unlock the previous record. */ if (tdb_unlock_record(tdb, tlock->off) != 0) goto fail; } if (want_next) { /* We have offset of old record: grab next */ if (tdb_rec_read(tdb, tlock->off, rec) == -1) goto fail; tlock->off = rec->next; } /* Iterate through chain */ while( tlock->off) { if (tdb_rec_read(tdb, tlock->off, rec) == -1) goto fail; /* Detect infinite loops. From "Shlomi Yaakobovich" . */ if (tlock->off == rec->next) { tdb->ecode = TDB_ERR_CORRUPT; TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_next_lock: loop detected.\n")); goto fail; } if (!TDB_DEAD(rec)) { /* Woohoo: we found one! */ if (tdb_lock_record(tdb, tlock->off) != 0) goto fail; return tlock->off; } tlock->off = rec->next; } tdb_unlock(tdb, tlock->list, tlock->lock_rw); want_next = 0; } /* We finished iteration without finding anything */ tdb->ecode = TDB_SUCCESS; return 0; fail: tlock->off = 0; if (tdb_unlock(tdb, tlock->list, tlock->lock_rw) != 0) TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_next_lock: On error unlock failed!\n")); return TDB_NEXT_LOCK_ERR; } /* traverse the entire database - calling fn(tdb, key, data) on each element. return -1 on error or the record count traversed if fn is NULL then it is not called a non-zero return value from fn() indicates that the traversal should stop */ static int tdb_traverse_internal(struct tdb_context *tdb, tdb_traverse_func fn, void *private_data, struct tdb_traverse_lock *tl) { TDB_DATA key, dbuf; struct tdb_record rec; int ret = 0, count = 0; tdb_off_t off; size_t recbuf_len; recbuf_len = 4096; key.dptr = malloc(recbuf_len); if (key.dptr == NULL) { return -1; } /* This was in the initialization, above, but the IRIX compiler * did not like it. crh */ tl->next = tdb->travlocks.next; /* fcntl locks don't stack: beware traverse inside traverse */ tdb->travlocks.next = tl; /* tdb_next_lock places locks on the record returned, and its chain */ while ((off = tdb_next_lock(tdb, tl, &rec)) != 0) { tdb_len_t full_len; int nread; if (off == TDB_NEXT_LOCK_ERR) { ret = -1; goto out; } full_len = rec.key_len + rec.data_len; if (full_len > recbuf_len) { recbuf_len = full_len; /* * No realloc, we don't need the old data and thus can * do without the memcpy */ free(key.dptr); key.dptr = malloc(recbuf_len); if (key.dptr == NULL) { ret = -1; if (tdb_unlock(tdb, tl->list, tl->lock_rw) != 0) { goto out; } if (tdb_unlock_record(tdb, tl->off) != 0) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_traverse: malloc " "failed and unlock_record " "failed!\n")); } goto out; } } count++; /* now read the full record */ nread = tdb->methods->tdb_read(tdb, tl->off + sizeof(rec), key.dptr, full_len, 0); if (nread == -1) { ret = -1; if (tdb_unlock(tdb, tl->list, tl->lock_rw) != 0) goto out; if (tdb_unlock_record(tdb, tl->off) != 0) TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_traverse: key.dptr == NULL and unlock_record failed!\n")); goto out; } key.dsize = rec.key_len; dbuf.dptr = key.dptr + rec.key_len; dbuf.dsize = rec.data_len; tdb_trace_1rec_retrec(tdb, "traverse", key, dbuf); /* Drop chain lock, call out */ if (tdb_unlock(tdb, tl->list, tl->lock_rw) != 0) { ret = -1; goto out; } if (fn && fn(tdb, key, dbuf, private_data)) { /* They want us to terminate traversal */ tdb_trace_ret(tdb, "tdb_traverse_end", count); if (tdb_unlock_record(tdb, tl->off) != 0) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_traverse: unlock_record failed!\n"));; ret = -1; } goto out; } } tdb_trace(tdb, "tdb_traverse_end"); out: SAFE_FREE(key.dptr); tdb->travlocks.next = tl->next; if (ret < 0) return -1; else return count; } /* a read style traverse - temporarily marks each record read only */ _PUBLIC_ int tdb_traverse_read(struct tdb_context *tdb, tdb_traverse_func fn, void *private_data) { struct tdb_traverse_lock tl = { NULL, 0, 0, F_RDLCK }; int ret; tdb->traverse_read++; tdb_trace(tdb, "tdb_traverse_read_start"); ret = tdb_traverse_internal(tdb, fn, private_data, &tl); tdb->traverse_read--; return ret; } /* a write style traverse - needs to get the transaction lock to prevent deadlocks WARNING: The data buffer given to the callback fn does NOT meet the alignment guarantees malloc gives you. */ _PUBLIC_ int tdb_traverse(struct tdb_context *tdb, tdb_traverse_func fn, void *private_data) { struct tdb_traverse_lock tl = { NULL, 0, 0, F_WRLCK }; enum tdb_lock_flags lock_flags; int ret; if (tdb->read_only || tdb->traverse_read) { return tdb_traverse_read(tdb, fn, private_data); } lock_flags = TDB_LOCK_WAIT; if (tdb->allrecord_lock.count != 0) { /* * This avoids a deadlock between tdb_lockall() and * tdb_traverse(). See * https://bugzilla.samba.org/show_bug.cgi?id=11381 */ lock_flags = TDB_LOCK_NOWAIT; } if (tdb_transaction_lock(tdb, F_WRLCK, lock_flags)) { return -1; } tdb->traverse_write++; tdb_trace(tdb, "tdb_traverse_start"); ret = tdb_traverse_internal(tdb, fn, private_data, &tl); tdb->traverse_write--; tdb_transaction_unlock(tdb, F_WRLCK); return ret; } /* find the first entry in the database and return its key */ _PUBLIC_ TDB_DATA tdb_firstkey(struct tdb_context *tdb) { TDB_DATA key; struct tdb_record rec; tdb_off_t off; /* release any old lock */ if (tdb_unlock_record(tdb, tdb->travlocks.off) != 0) return tdb_null; tdb->travlocks.off = tdb->travlocks.list = 0; tdb->travlocks.lock_rw = F_RDLCK; /* Grab first record: locks chain and returned record. */ off = tdb_next_lock(tdb, &tdb->travlocks, &rec); if (off == 0 || off == TDB_NEXT_LOCK_ERR) { tdb_trace_retrec(tdb, "tdb_firstkey", tdb_null); return tdb_null; } /* now read the key */ key.dsize = rec.key_len; key.dptr =tdb_alloc_read(tdb,tdb->travlocks.off+sizeof(rec),key.dsize); tdb_trace_retrec(tdb, "tdb_firstkey", key); /* Unlock the hash chain of the record we just read. */ if (tdb_unlock(tdb, tdb->travlocks.list, tdb->travlocks.lock_rw) != 0) TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_firstkey: error occurred while tdb_unlocking!\n")); return key; } /* find the next entry in the database, returning its key */ _PUBLIC_ TDB_DATA tdb_nextkey(struct tdb_context *tdb, TDB_DATA oldkey) { uint32_t oldlist; TDB_DATA key = tdb_null; struct tdb_record rec; unsigned char *k = NULL; tdb_off_t off; /* Is locked key the old key? If so, traverse will be reliable. */ if (tdb->travlocks.off) { if (tdb_lock(tdb,tdb->travlocks.list,tdb->travlocks.lock_rw)) return tdb_null; if (tdb_rec_read(tdb, tdb->travlocks.off, &rec) == -1 || !(k = tdb_alloc_read(tdb,tdb->travlocks.off+sizeof(rec), rec.key_len)) || memcmp(k, oldkey.dptr, oldkey.dsize) != 0) { /* No, it wasn't: unlock it and start from scratch */ if (tdb_unlock_record(tdb, tdb->travlocks.off) != 0) { tdb_trace_1rec_retrec(tdb, "tdb_nextkey", oldkey, tdb_null); SAFE_FREE(k); return tdb_null; } if (tdb_unlock(tdb, tdb->travlocks.list, tdb->travlocks.lock_rw) != 0) { SAFE_FREE(k); return tdb_null; } tdb->travlocks.off = 0; } SAFE_FREE(k); } if (!tdb->travlocks.off) { /* No previous element: do normal find, and lock record */ tdb->travlocks.off = tdb_find_lock_hash(tdb, oldkey, tdb->hash_fn(&oldkey), tdb->travlocks.lock_rw, &rec); if (!tdb->travlocks.off) { tdb_trace_1rec_retrec(tdb, "tdb_nextkey", oldkey, tdb_null); return tdb_null; } tdb->travlocks.list = BUCKET(rec.full_hash); if (tdb_lock_record(tdb, tdb->travlocks.off) != 0) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_nextkey: lock_record failed (%s)!\n", strerror(errno))); return tdb_null; } } oldlist = tdb->travlocks.list; /* Grab next record: locks chain and returned record, unlocks old record */ off = tdb_next_lock(tdb, &tdb->travlocks, &rec); if (off != TDB_NEXT_LOCK_ERR && off != 0) { key.dsize = rec.key_len; key.dptr = tdb_alloc_read(tdb, tdb->travlocks.off+sizeof(rec), key.dsize); /* Unlock the chain of this new record */ if (tdb_unlock(tdb, tdb->travlocks.list, tdb->travlocks.lock_rw) != 0) TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_nextkey: WARNING tdb_unlock failed!\n")); } /* Unlock the chain of old record */ if (tdb_unlock(tdb, oldlist, tdb->travlocks.lock_rw) != 0) TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_nextkey: WARNING tdb_unlock failed!\n")); tdb_trace_1rec_retrec(tdb, "tdb_nextkey", oldkey, key); return key; } _PUBLIC_ int tdb_traverse_chain(struct tdb_context *tdb, unsigned chain, tdb_traverse_func fn, void *private_data) { tdb_off_t rec_ptr; struct tdb_chainwalk_ctx chainwalk; int count = 0; int ret; if (chain >= tdb->hash_size) { tdb->ecode = TDB_ERR_EINVAL; return -1; } if (tdb->traverse_read != 0) { tdb->ecode = TDB_ERR_LOCK; return -1; } ret = tdb_lock(tdb, chain, F_RDLCK); if (ret == -1) { return -1; } tdb->traverse_read += 1; ret = tdb_ofs_read(tdb, TDB_HASH_TOP(chain), &rec_ptr); if (ret == -1) { goto fail; } tdb_chainwalk_init(&chainwalk, rec_ptr); while (rec_ptr != 0) { struct tdb_record rec; bool ok; ret = tdb_rec_read(tdb, rec_ptr, &rec); if (ret == -1) { goto fail; } if (!TDB_DEAD(&rec)) { /* no overflow checks, tdb_rec_read checked it */ tdb_off_t key_ofs = rec_ptr + sizeof(rec); size_t full_len = rec.key_len + rec.data_len; uint8_t *buf = NULL; TDB_DATA key = { .dsize = rec.key_len }; TDB_DATA data = { .dsize = rec.data_len }; if ((tdb->transaction == NULL) && (tdb->map_ptr != NULL)) { ret = tdb_oob(tdb, key_ofs, full_len, 0); if (ret == -1) { goto fail; } key.dptr = (uint8_t *)tdb->map_ptr + key_ofs; } else { buf = tdb_alloc_read(tdb, key_ofs, full_len); if (buf == NULL) { goto fail; } key.dptr = buf; } data.dptr = key.dptr + key.dsize; ret = fn(tdb, key, data, private_data); free(buf); count += 1; if (ret != 0) { break; } } rec_ptr = rec.next; ok = tdb_chainwalk_check(tdb, &chainwalk, rec_ptr); if (!ok) { goto fail; } } tdb->traverse_read -= 1; tdb_unlock(tdb, chain, F_RDLCK); return count; fail: tdb->traverse_read -= 1; tdb_unlock(tdb, chain, F_RDLCK); return -1; } _PUBLIC_ int tdb_traverse_key_chain(struct tdb_context *tdb, TDB_DATA key, tdb_traverse_func fn, void *private_data) { uint32_t hash, chain; int ret; hash = tdb->hash_fn(&key); chain = BUCKET(hash); ret = tdb_traverse_chain(tdb, chain, fn, private_data); return ret; } tdb-1.4.2/configure0000770000000000000000000000066013444661620014141 0ustar rootroot00000000000000#!/bin/sh PREVPATH=`dirname $0` if [ -f $PREVPATH/../../buildtools/bin/waf ]; then WAF=../../buildtools/bin/waf elif [ -f $PREVPATH/buildtools/bin/waf ]; then WAF=./buildtools/bin/waf else echo "replace: Unable to find waf" exit 1 fi # using JOBS=1 gives maximum compatibility with # systems like AIX which have broken threading in python JOBS=1 export JOBS cd . || exit 1 $PYTHON $WAF configure "$@" || exit 1 cd $PREVPATH tdb-1.4.2/docs/README0000660000000000000000000002446013527011454014043 0ustar rootroot00000000000000tdb - a trivial database system tridge@linuxcare.com December 1999 ================================== This is a simple database API. It was inspired by the realisation that in Samba we have several ad-hoc bits of code that essentially implement small databases for sharing structures between parts of Samba. As I was about to add another I realised that a generic database module was called for to replace all the ad-hoc bits. I based the interface on gdbm. I couldn't use gdbm as we need to be able to have multiple writers to the databases at one time. Compilation ----------- add HAVE_MMAP=1 to use mmap instead of read/write add NOLOCK=1 to disable locking code Testing ------- Compile tdbtest.c and link with gdbm for testing. tdbtest will perform identical operations via tdb and gdbm then make sure the result is the same Also included is tdbtool, which allows simple database manipulation on the commandline. tdbtest and tdbtool are not built as part of Samba, but are included for completeness. Interface --------- The interface is very similar to gdbm except for the following: - different open interface. The tdb_open call is more similar to a traditional open() - no tdbm_reorganise() function - no tdbm_sync() function. No operations are cached in the library anyway - added a tdb_traverse() function for traversing the whole database - added transactions support A general rule for using tdb is that the caller frees any returned TDB_DATA structures. Just call free(p.dptr) to free a TDB_DATA return value called p. This is the same as gdbm. here is a full list of tdb functions with brief descriptions. ---------------------------------------------------------------------- TDB_CONTEXT *tdb_open(char *name, int hash_size, int tdb_flags, int open_flags, mode_t mode) open the database, creating it if necessary The open_flags and mode are passed straight to the open call on the database file. A flags value of O_WRONLY is invalid The hash size is advisory, use zero for a default value. return is NULL on error possible tdb_flags are: TDB_CLEAR_IF_FIRST - clear database if we are the only one with it open TDB_INTERNAL - don't use a file, instead store the data in memory. The filename is ignored in this case. TDB_NOLOCK - don't do any locking TDB_NOMMAP - don't use mmap TDB_NOSYNC - don't synchronise transactions to disk TDB_SEQNUM - maintain a sequence number TDB_VOLATILE - activate the per-hashchain freelist, default 5 TDB_ALLOW_NESTING - allow transactions to nest TDB_DISALLOW_NESTING - disallow transactions to nest ---------------------------------------------------------------------- TDB_CONTEXT *tdb_open_ex(char *name, int hash_size, int tdb_flags, int open_flags, mode_t mode, const struct tdb_logging_context *log_ctx, tdb_hash_func hash_fn) This is like tdb_open(), but allows you to pass an initial logging and hash function. Be careful when passing a hash function - all users of the database must use the same hash function or you will get data corruption. ---------------------------------------------------------------------- char *tdb_error(TDB_CONTEXT *tdb); return a error string for the last tdb error ---------------------------------------------------------------------- int tdb_close(TDB_CONTEXT *tdb); close a database ---------------------------------------------------------------------- TDB_DATA tdb_fetch(TDB_CONTEXT *tdb, TDB_DATA key); fetch an entry in the database given a key if the return value has a null dptr then a error occurred caller must free the resulting data ---------------------------------------------------------------------- int tdb_parse_record(struct tdb_context *tdb, TDB_DATA key, int (*parser)(TDB_DATA key, TDB_DATA data, void *private_data), void *private_data); Hand a record to a parser function without allocating it. This function is meant as a fast tdb_fetch alternative for large records that are frequently read. The "key" and "data" arguments point directly into the tdb shared memory, they are not aligned at any boundary. WARNING: The parser is called while tdb holds a lock on the record. DO NOT call other tdb routines from within the parser. Also, for good performance you should make the parser fast to allow parallel operations. tdb_parse_record returns -1 if the record was not found. If the record was found, the return value of "parser" is passed up to the caller. ---------------------------------------------------------------------- int tdb_exists(TDB_CONTEXT *tdb, TDB_DATA key); check if an entry in the database exists note that 1 is returned if the key is found and 0 is returned if not found this doesn't match the conventions in the rest of this module, but is compatible with gdbm ---------------------------------------------------------------------- int tdb_traverse(TDB_CONTEXT *tdb, int (*fn)(TDB_CONTEXT *tdb, TDB_DATA key, TDB_DATA dbuf, void *state), void *state); traverse the entire database - calling fn(tdb, key, data, state) on each element. return -1 on error or the record count traversed if fn is NULL then it is not called a non-zero return value from fn() indicates that the traversal should stop. Traversal callbacks may not start transactions. WARNING: The data buffer given to the callback fn does NOT meet the alignment restrictions malloc gives you. ---------------------------------------------------------------------- int tdb_traverse_read(TDB_CONTEXT *tdb, int (*fn)(TDB_CONTEXT *tdb, TDB_DATA key, TDB_DATA dbuf, void *state), void *state); traverse the entire database - calling fn(tdb, key, data, state) on each element, but marking the database read only during the traversal, so any write operations will fail. This allows tdb to use read locks, which increases the parallelism possible during the traversal. return -1 on error or the record count traversed if fn is NULL then it is not called a non-zero return value from fn() indicates that the traversal should stop. Traversal callbacks may not start transactions. ---------------------------------------------------------------------- TDB_DATA tdb_firstkey(TDB_CONTEXT *tdb); find the first entry in the database and return its key the caller must free the returned data ---------------------------------------------------------------------- TDB_DATA tdb_nextkey(TDB_CONTEXT *tdb, TDB_DATA key); find the next entry in the database, returning its key the caller must free the returned data ---------------------------------------------------------------------- int tdb_delete(TDB_CONTEXT *tdb, TDB_DATA key); delete an entry in the database given a key ---------------------------------------------------------------------- int tdb_store(TDB_CONTEXT *tdb, TDB_DATA key, TDB_DATA dbuf, int flag); store an element in the database, replacing any existing element with the same key If flag==TDB_INSERT then don't overwrite an existing entry If flag==TDB_MODIFY then don't create a new entry return 0 on success, -1 on failure ---------------------------------------------------------------------- int tdb_writelock(TDB_CONTEXT *tdb); lock the database. If we already have it locked then don't do anything ---------------------------------------------------------------------- int tdb_writeunlock(TDB_CONTEXT *tdb); unlock the database ---------------------------------------------------------------------- int tdb_chainlock(TDB_CONTEXT *tdb, TDB_DATA key); lock one hash chain. This is meant to be used to reduce locking contention - it cannot guarantee how many records will be locked ---------------------------------------------------------------------- int tdb_chainunlock(TDB_CONTEXT *tdb, TDB_DATA key); unlock one hash chain ---------------------------------------------------------------------- int tdb_transaction_start(TDB_CONTEXT *tdb) start a transaction. All operations after the transaction start can either be committed with tdb_transaction_commit() or cancelled with tdb_transaction_cancel(). If you call tdb_transaction_start() again on the same tdb context while a transaction is in progress, then the same transaction buffer is re-used. The number of tdb_transaction_{commit,cancel} operations must match the number of successful tdb_transaction_start() calls. Note that transactions are by default disk synchronous, and use a recover area in the database to automatically recover the database on the next open if the system crashes during a transaction. You can disable the synchronous transaction recovery setup using the TDB_NOSYNC flag, which will greatly speed up operations at the risk of corrupting your database if the system crashes. Operations made within a transaction are not visible to other users of the database until a successful commit. ---------------------------------------------------------------------- int tdb_transaction_cancel(TDB_CONTEXT *tdb) cancel a current transaction, discarding all write and lock operations that have been made since the transaction started. ---------------------------------------------------------------------- int tdb_transaction_commit(TDB_CONTEXT *tdb) commit a current transaction, updating the database and releasing the transaction locks. ---------------------------------------------------------------------- int tdb_transaction_prepare_commit(TDB_CONTEXT *tdb) prepare to commit a current transaction, for two-phase commits. Once prepared for commit, the only allowed calls are tdb_transaction_commit() or tdb_transaction_cancel(). Preparing allocates disk space for the pending updates, so a subsequent commit should succeed (barring any hardware failures). ---------------------------------------------------------------------- int tdb_check(TDB_CONTEXT *tdb, int (*check)(TDB_DATA key, TDB_DATA data, void *private_data), void *private_data);) check the consistency of the database, calling back the check function (if non-NULL) with each record. If some consistency check fails, or the supplied check function returns -1, tdb_check returns -1, otherwise 0. Note that logging function (if set) will be called with additional information on the corruption found. tdb-1.4.2/docs/mainpage.dox0000660000000000000000000000362712406075657015474 0ustar rootroot00000000000000/** @mainpage This is a simple database API. It was inspired by the realisation that in Samba we have several ad-hoc bits of code that essentially implement small databases for sharing structures between parts of Samba. The interface is based on gdbm. gdbm couldn't be use as we needed to be able to have multiple writers to the databases at one time. @section tdb_download Download You can download the latest releases of tdb from the tdb directory on the samba public source archive. You can download the latest code either via git or rsync. To fetch via git see the following guide: Using Git for Samba Development Once you have cloned the tree switch to the master branch and cd into the source/lib/tdb directory. To fetch via rsync use these commands:
  rsync -Pavz samba.org::ftp/unpacked/standalone_projects/lib/tdb .
  rsync -Pavz samba.org::ftp/unpacked/standalone_projects/lib/replace .
and build in tdb. It will find the replace library in the directory above automatically. @section tdb_bugs Discussion and bug reports tdb does not currently have its own mailing list or bug tracking system. For now, please use the samba-technical mailing list, and the Samba bugzilla bug tracking system. @section tdb_compilation Compilation add HAVE_MMAP=1 to use mmap instead of read/write add NOLOCK=1 to disable locking code @section tdb_testing Testing Compile tdbtest.c and link with gdbm for testing. tdbtest will perform identical operations via tdb and gdbm then make sure the result is the same Also included is tdbtool, which allows simple database manipulation on the commandline. tdbtest and tdbtool are not built as part of Samba, but are included for completeness. */ tdb-1.4.2/docs/mutex.txt0000660000000000000000000001630312632255624015070 0ustar rootroot00000000000000Tdb is a hashtable database with multiple concurrent writer and external record lock support. For speed reasons, wherever possible tdb uses a shared memory mapped area for data access. In its currently released form, it uses fcntl byte-range locks to coordinate access to the data itself. The tdb data is organized as a hashtable. Hash collisions are dealt with by forming a linked list of records that share a hash value. The individual linked lists are protected across processes with 1-byte fcntl locks on the starting pointer of the linked list representing a hash value. The external locking API of tdb allows one to lock individual records. Instead of really locking individual records, the tdb API locks a complete linked list with a fcntl lock. The external locking API of tdb also allows one to lock the complete database, and ctdb uses this facility to freeze databases during a recovery. While the so-called allrecord lock is held, all linked lists and all individual records are frozen alltogether. Tdb achieves this by locking the complete file range with a single fcntl lock. Individual 1-byte locks for the linked lists conflict with this. Access to records is prevented by the one large fnctl byte range lock. Fcntl locks have been chosen for tdb for two reasons: First they are portable across all current unixes. Secondly they provide auto-cleanup. If a process dies while holding a fcntl lock, the lock is given up as if it was explicitly unlocked. Thus fcntl locks provide a very robust locking scheme, if a process dies for any reason the database will not stay blocked until reboot. This robustness is very important for long-running services, a reboot is not an option for most users of tdb. Unfortunately, during stress testing, fcntl locks have turned out to be a major problem for performance. The particular problem that was seen happens when ctdb on a busy server does a recovery. A recovery means that ctdb has to freeze all tdb databases for some time, usually a few seconds. This is done with the allrecord lock. During the recovery phase on a busy server many smbd processes try to access the tdb file with blocking fcntl calls. The specific test in question easily reproduces 7,000 processes piling up waiting for 1-byte fcntl locks. When ctdb is done with the recovery, it gives up the allrecord lock, covering the whole file range. All 7,000 processes waiting for 1-byte fcntl locks are woken up, trying to acquire their lock. The special implementation of fcntl locks in Linux (up to 2013-02-12 at least) protects all fcntl lock operations with a single system-wide spinlock. If 7,000 process waiting for the allrecord lock to become released this leads to a thundering herd condition, all CPUs are spinning on that single spinlock. Functionally the kernel is fine, eventually the thundering herd slows down and every process correctly gets his share and locking range, but the performance of the system while the herd is active is worse than expected. The thundering herd is only the worst case scenario for fcntl lock use. The single spinlock for fcntl operations is also a performance penalty for normal operations. In the cluster case, every read and write SMB request has to do two fcntl calls to provide correct SMB mandatory locks. The single spinlock is one source of serialization for the SMB read/write requests, limiting the parallelism that can be achieved in a multi-core system. While trying to tune his servers, Ira Cooper, Samba Team member, found fcntl locks to be a problem on Solaris as well. Ira pointed out that there is a potential alternative locking mechanism that might be more scalable: Process shared robust mutexes, as defined by Posix 2008 for example via http://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_mutexattr_setpshared.html http://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_mutexattr_setrobust.html Pthread mutexes provide one of the core mechanisms in posix threads to protect in-process data structures from concurrent access by multiple threads. In the Linux implementation, a pthread_mutex_t is represented by a data structure in user space that requires no kernel calls in the uncontended case for locking and unlocking. Locking and unlocking in the uncontended case is implemented purely in user space with atomic CPU instructions and thus are very fast. The setpshared functions indicate to the kernel that the mutex is about to be shared between processes in a common shared memory area. The process shared posix mutexes have the potential to replace fcntl locking to coordinate mmap access for tdbs. However, they are missing the criticial auto-cleanup property that fcntl provides when a process dies. A process that dies hard while holding a shared mutex has no chance to clean up the protected data structures and unlock the shared mutex. Thus with a pure process shared mutex the mutex will remain locked forever until the data structures are re-initialized from scratch. With the robust mutexes defined by Posix the process shared mutexes have been extended with a limited auto-cleanup property. If a mutex has been declared robust, when a process exits while holding that mutex, the next process trying to lock the mutex will get the special error message EOWNERDEAD. This informs the caller that the data structures the mutex protects are potentially corrupt and need to be cleaned up. The error message EOWNERDEAD when trying to lock a mutex is an extension over the fcntl functionality. A process that does a blocking fcntl lock call is not informed about whether the lock was explicitly freed by a process still alive or due to an unplanned process exit. At the time of this writing (February 2013), at least Linux and OpenSolaris also implement the robustness feature of process-shared mutexes. Converting the tdb locking mechanism from fcntl to mutexes has to take care of both types of locks that are used on tdb files. The easy part is to use mutexes to replace the 1-byte linked list locks covering the individual hashes. Those can be represented by a mutex each. Covering the allrecord lock is more difficult. The allrecord lock uses a fcntl lock spanning all hash list locks simultaneously. This basic functionality is not easily possible with mutexes. A mutex carries 1 bit of information, a fcntl lock can carry an arbitrary amount of information. In order to support the allrecord lock, we have an allrecord_lock variable protected by an allrecord_mutex. The coordination between the allrecord lock and the chainlocks works like this: - Getting a chain lock works like this: 1. get chain mutex 2. return success if allrecord_lock is F_UNLCK (not locked) 3. return success if allrecord_lock is F_RDLCK (locked readonly) and we only need a read lock. 4. release chain mutex 5. wait for allrecord_mutex 6. unlock allrecord_mutex 7. goto 1. - Getting the allrecord lock: 1. get the allrecord mutex 2. return error if allrecord_lock is not F_UNLCK (it's locked) 3. set allrecord_lock to the desired value. 4. in a loop: lock(blocking) / unlock each chain mutex. 5. return success. - allrecord lock upgrade: 1. check we already have the allrecord lock with F_RDLCK. 3. set allrecord_lock to F_WRLCK 4. in a loop: lock(blocking) / unlock each chain mutex. 5. return success. tdb-1.4.2/docs/tdb.magic0000660000000000000000000000053112406075657014741 0ustar rootroot00000000000000# Magic file(1) information about tdb files. # # Install this into /etc/magic or the corresponding location for your # system, or pass as a -m argument to file(1). # You may use and redistribute this file without restriction. 0 string TDB\ file TDB database >32 lelong =0x2601196D version 6, little-endian >>36 lelong x hash size %d bytes tdb-1.4.2/docs/tracing.txt0000660000000000000000000000356412406075657015367 0ustar rootroot00000000000000How And Why To Use TDB Tracing ============================== You can trace all TDB operations, using TDB_TRACE. It is not complete (error conditions which expect to the logged will not always be traced correctly, so you should set up a logging function too), but is designed to collect benchmark-style traces to allow us to optimize TDB. Note: tracing is not efficient, and the trace files are huge: a traverse of the database is particularly large! But they compress very well with rzip (http://rzip.samba.org) How to gather trace files: -------------------------- 1) Uncomment /* #define TDB_TRACE 1 */ in tdb_private.h. 2) Rebuild TDB, and everything that uses it. 3) Run something. Your trace files will be called .trace.. These files will not be overwritten: if the same process reopens the same TDB, an error will be logged and tracing will be disabled. How to replay trace files: -------------------------- 1) For benchmarking, remember to rebuild tdb with #define TDB_TRACE commented out again! 2) Grab the latest "replace_trace.c" from CCAN's tdb module (tools/ dir): http://ccan.ozlabs.org/tarballs/tdb.tar.bz2 3) Compile up replay_trace, munging as necessary. 4) Run replay_trace ... If given more than one trace file (presumably from the same tdb) replay_trace will try to figure out the dependencies between the operations and fire off a child to run each trace. Occasionally it gets stuck, in which case it will add another dependency and retry. Eventually it will give a speed value. replay_trace can intuit the existence of previous data in the tdb (ie. activity prior to the trace(s) supplied) and will prepopulate as neccessary. You can run --quiet for straight benchmark results, and -n to run multiple times (this saves time, since it need only calculate dependencies once). Good luck! Rusty Russell tdb-1.4.2/doxy.config0000660000000000000000000021316112406075657014415 0ustar rootroot00000000000000# Doxyfile 1.7.3 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project. # # All text after a hash (#) is considered a comment and will be ignored. # The format is: # TAG = value [value, ...] # For lists items can also be appended using: # TAG += value [value, ...] # Values that contain spaces should be placed between quotes (" "). #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- # This tag specifies the encoding used for all characters in the config file # that follow. The default is UTF-8 which is also the encoding used for all # text before the first occurrence of this tag. Doxygen uses libiconv (or the # iconv built into libc) for the transcoding. See # http://www.gnu.org/software/libiconv for the list of possible encodings. DOXYFILE_ENCODING = UTF-8 # The PROJECT_NAME tag is a single word (or a sequence of words surrounded # by quotes) that should identify the project. PROJECT_NAME = tdb # The PROJECT_NUMBER tag can be used to enter a project or revision number. # This could be handy for archiving the generated documentation or # if some version control system is used. PROJECT_NUMBER = 1.2.9 # Using the PROJECT_BRIEF tag one can provide an optional one line description for a project that appears at the top of each page and should give viewer a quick idea about the purpose of the project. Keep the description short. PROJECT_BRIEF = # With the PROJECT_LOGO tag one can specify an logo or icon that is # included in the documentation. The maximum height of the logo should not # exceed 55 pixels and the maximum width should not exceed 200 pixels. # Doxygen will copy the logo to the output directory. PROJECT_LOGO = # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) # base path where the generated documentation will be put. # If a relative path is entered, it will be relative to the location # where doxygen was started. If left blank the current directory will be used. OUTPUT_DIRECTORY = docs # If the CREATE_SUBDIRS tag is set to YES, then doxygen will create # 4096 sub-directories (in 2 levels) under the output directory of each output # format and will distribute the generated files over these directories. # Enabling this option can be useful when feeding doxygen a huge amount of # source files, where putting all generated files in the same directory would # otherwise cause performance problems for the file system. CREATE_SUBDIRS = NO # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. # The default language is English, other supported languages are: # Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, # Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German, # Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English # messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, # Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak, # Slovene, Spanish, Swedish, Ukrainian, and Vietnamese. OUTPUT_LANGUAGE = English # If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will # include brief member descriptions after the members that are listed in # the file and class documentation (similar to JavaDoc). # Set to NO to disable this. BRIEF_MEMBER_DESC = YES # If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend # the brief description of a member or function before the detailed description. # Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the # brief descriptions will be completely suppressed. REPEAT_BRIEF = YES # This tag implements a quasi-intelligent brief description abbreviator # that is used to form the text in various listings. Each string # in this list, if found as the leading text of the brief description, will be # stripped from the text and the result after processing the whole list, is # used as the annotated text. Otherwise, the brief description is used as-is. # If left blank, the following values are used ("$name" is automatically # replaced with the name of the entity): "The $name class" "The $name widget" # "The $name file" "is" "provides" "specifies" "contains" # "represents" "a" "an" "the" ABBREVIATE_BRIEF = "The $name class" \ "The $name widget" \ "The $name file" \ is \ provides \ specifies \ contains \ represents \ a \ an \ the # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then # Doxygen will generate a detailed section even if there is only a brief # description. ALWAYS_DETAILED_SEC = NO # If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all # inherited members of a class in the documentation of that class as if those # members were ordinary class members. Constructors, destructors and assignment # operators of the base classes will not be shown. INLINE_INHERITED_MEMB = NO # If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full # path before files name in the file list and in the header files. If set # to NO the shortest path that makes the file name unique will be used. FULL_PATH_NAMES = YES # If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag # can be used to strip a user-defined part of the path. Stripping is # only done if one of the specified strings matches the left-hand part of # the path. The tag can be used to show relative paths in the file list. # If left blank the directory from which doxygen is run is used as the # path to strip. STRIP_FROM_PATH = # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of # the path mentioned in the documentation of a class, which tells # the reader which header file to include in order to use a class. # If left blank only the name of the header file containing the class # definition is used. Otherwise one should specify the include paths that # are normally passed to the compiler using the -I flag. STRIP_FROM_INC_PATH = # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter # (but less readable) file names. This can be useful if your file system # doesn't support long names like on DOS, Mac, or CD-ROM. SHORT_NAMES = NO # If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen # will interpret the first line (until the first dot) of a JavaDoc-style # comment as the brief description. If set to NO, the JavaDoc # comments will behave just like regular Qt-style comments # (thus requiring an explicit @brief command for a brief description.) JAVADOC_AUTOBRIEF = YES # If the QT_AUTOBRIEF tag is set to YES then Doxygen will # interpret the first line (until the first dot) of a Qt-style # comment as the brief description. If set to NO, the comments # will behave just like regular Qt-style comments (thus requiring # an explicit \brief command for a brief description.) QT_AUTOBRIEF = NO # The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen # treat a multi-line C++ special comment block (i.e. a block of //! or /// # comments) as a brief description. This used to be the default behaviour. # The new default is to treat a multi-line C++ comment block as a detailed # description. Set this tag to YES if you prefer the old behaviour instead. MULTILINE_CPP_IS_BRIEF = NO # If the INHERIT_DOCS tag is set to YES (the default) then an undocumented # member inherits the documentation from any documented member that it # re-implements. INHERIT_DOCS = YES # If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce # a new page for each member. If set to NO, the documentation of a member will # be part of the file/class/namespace that contains it. SEPARATE_MEMBER_PAGES = NO # The TAB_SIZE tag can be used to set the number of spaces in a tab. # Doxygen uses this value to replace tabs by spaces in code fragments. TAB_SIZE = 8 # This tag can be used to specify a number of aliases that acts # as commands in the documentation. An alias has the form "name=value". # For example adding "sideeffect=\par Side Effects:\n" will allow you to # put the command \sideeffect (or @sideeffect) in the documentation, which # will result in a user-defined paragraph with heading "Side Effects:". # You can put \n's in the value part of an alias to insert newlines. ALIASES = # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C # sources only. Doxygen will then generate output that is more tailored for C. # For instance, some of the names that are used will be different. The list # of all members will be omitted, etc. OPTIMIZE_OUTPUT_FOR_C = YES # Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java # sources only. Doxygen will then generate output that is more tailored for # Java. For instance, namespaces will be presented as packages, qualified # scopes will look different, etc. OPTIMIZE_OUTPUT_JAVA = NO # Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran # sources only. Doxygen will then generate output that is more tailored for # Fortran. OPTIMIZE_FOR_FORTRAN = NO # Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL # sources. Doxygen will then generate output that is tailored for # VHDL. OPTIMIZE_OUTPUT_VHDL = NO # Doxygen selects the parser to use depending on the extension of the files it # parses. With this tag you can assign which parser to use for a given extension. # Doxygen has a built-in mapping, but you can override or extend it using this # tag. The format is ext=language, where ext is a file extension, and language # is one of the parsers supported by doxygen: IDL, Java, Javascript, CSharp, C, # C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, C++. For instance to make # doxygen treat .inc files as Fortran files (default is PHP), and .f files as C # (default is Fortran), use: inc=Fortran f=C. Note that for custom extensions # you also need to set FILE_PATTERNS otherwise the files are not read by doxygen. EXTENSION_MAPPING = # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want # to include (a tag file for) the STL sources as input, then you should # set this tag to YES in order to let doxygen match functions declarations and # definitions whose arguments contain STL classes (e.g. func(std::string); v.s. # func(std::string) {}). This also makes the inheritance and collaboration # diagrams that involve STL classes more complete and accurate. BUILTIN_STL_SUPPORT = NO # If you use Microsoft's C++/CLI language, you should set this option to YES to # enable parsing support. CPP_CLI_SUPPORT = NO # Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. # Doxygen will parse them like normal C++ but will assume all classes use public # instead of private inheritance when no explicit protection keyword is present. SIP_SUPPORT = NO # For Microsoft's IDL there are propget and propput attributes to indicate getter # and setter methods for a property. Setting this option to YES (the default) # will make doxygen replace the get and set methods by a property in the # documentation. This will only work if the methods are indeed getting or # setting a simple type. If this is not the case, or you want to show the # methods anyway, you should set this option to NO. IDL_PROPERTY_SUPPORT = YES # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC # tag is set to YES, then doxygen will reuse the documentation of the first # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. DISTRIBUTE_GROUP_DOC = NO # Set the SUBGROUPING tag to YES (the default) to allow class member groups of # the same type (for instance a group of public functions) to be put as a # subgroup of that type (e.g. under the Public Functions section). Set it to # NO to prevent subgrouping. Alternatively, this can be done per class using # the \nosubgrouping command. SUBGROUPING = YES # When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum # is documented as struct, union, or enum with the name of the typedef. So # typedef struct TypeS {} TypeT, will appear in the documentation as a struct # with name TypeT. When disabled the typedef will appear as a member of a file, # namespace, or class. And the struct will be named TypeS. This can typically # be useful for C code in case the coding convention dictates that all compound # types are typedef'ed and only the typedef is referenced, never the tag name. TYPEDEF_HIDES_STRUCT = NO # The SYMBOL_CACHE_SIZE determines the size of the internal cache use to # determine which symbols to keep in memory and which to flush to disk. # When the cache is full, less often used symbols will be written to disk. # For small to medium size projects (<1000 input files) the default value is # probably good enough. For larger projects a too small cache size can cause # doxygen to be busy swapping symbols to and from disk most of the time # causing a significant performance penalty. # If the system has enough physical memory increasing the cache will improve the # performance by keeping more symbols in memory. Note that the value works on # a logarithmic scale so increasing the size by one will roughly double the # memory usage. The cache size is given by this formula: # 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0, # corresponding to a cache size of 2^16 = 65536 symbols SYMBOL_CACHE_SIZE = 0 #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- # If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in # documentation are documented, even if no documentation was available. # Private class members and static file members will be hidden unless # the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES EXTRACT_ALL = NO # If the EXTRACT_PRIVATE tag is set to YES all private members of a class # will be included in the documentation. EXTRACT_PRIVATE = NO # If the EXTRACT_STATIC tag is set to YES all static members of a file # will be included in the documentation. EXTRACT_STATIC = NO # If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) # defined locally in source files will be included in the documentation. # If set to NO only classes defined in header files are included. EXTRACT_LOCAL_CLASSES = NO # This flag is only useful for Objective-C code. When set to YES local # methods, which are defined in the implementation section but not in # the interface are included in the documentation. # If set to NO (the default) only methods in the interface are included. EXTRACT_LOCAL_METHODS = NO # If this flag is set to YES, the members of anonymous namespaces will be # extracted and appear in the documentation as a namespace called # 'anonymous_namespace{file}', where file will be replaced with the base # name of the file that contains the anonymous namespace. By default # anonymous namespaces are hidden. EXTRACT_ANON_NSPACES = NO # If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all # undocumented members of documented classes, files or namespaces. # If set to NO (the default) these members will be included in the # various overviews, but no documentation section is generated. # This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_MEMBERS = YES # If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all # undocumented classes that are normally visible in the class hierarchy. # If set to NO (the default) these classes will be included in the various # overviews. This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_CLASSES = YES # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all # friend (class|struct|union) declarations. # If set to NO (the default) these declarations will be included in the # documentation. HIDE_FRIEND_COMPOUNDS = NO # If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any # documentation blocks found inside the body of a function. # If set to NO (the default) these blocks will be appended to the # function's detailed documentation block. HIDE_IN_BODY_DOCS = NO # The INTERNAL_DOCS tag determines if documentation # that is typed after a \internal command is included. If the tag is set # to NO (the default) then the documentation will be excluded. # Set it to YES to include the internal documentation. INTERNAL_DOCS = NO # If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate # file names in lower-case letters. If set to YES upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ # in case and if your file system supports case sensitive file names. Windows # and Mac users are advised to set this option to NO. CASE_SENSE_NAMES = YES # If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen # will show members with their full class and namespace scopes in the # documentation. If set to YES the scope will be hidden. HIDE_SCOPE_NAMES = NO # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # will put a list of the files that are included by a file in the documentation # of that file. SHOW_INCLUDE_FILES = YES # If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen # will list include files with double quotes in the documentation # rather than with sharp brackets. FORCE_LOCAL_INCLUDES = NO # If the INLINE_INFO tag is set to YES (the default) then a tag [inline] # is inserted in the documentation for inline members. INLINE_INFO = YES # If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen # will sort the (detailed) documentation of file and class members # alphabetically by member name. If set to NO the members will appear in # declaration order. SORT_MEMBER_DOCS = YES # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the # brief documentation of file, namespace and class members alphabetically # by member name. If set to NO (the default) the members will appear in # declaration order. SORT_BRIEF_DOCS = NO # If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen # will sort the (brief and detailed) documentation of class members so that # constructors and destructors are listed first. If set to NO (the default) # the constructors will appear in the respective orders defined by # SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. # This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO # and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO. SORT_MEMBERS_CTORS_1ST = NO # If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the # hierarchy of group names into alphabetical order. If set to NO (the default) # the group names will appear in their defined order. SORT_GROUP_NAMES = NO # If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be # sorted by fully-qualified names, including namespaces. If set to # NO (the default), the class list will be sorted only by class name, # not including the namespace part. # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. # Note: This option applies only to the class list, not to the # alphabetical list. SORT_BY_SCOPE_NAME = NO # If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper type resolution of all parameters of a function it will reject a # match between the prototype and the implementation of a member function even if there is only one candidate or it is obvious which candidate to choose by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen # will still accept a match between prototype and implementation in such cases. STRICT_PROTO_MATCHING = NO # The GENERATE_TODOLIST tag can be used to enable (YES) or # disable (NO) the todo list. This list is created by putting \todo # commands in the documentation. GENERATE_TODOLIST = YES # The GENERATE_TESTLIST tag can be used to enable (YES) or # disable (NO) the test list. This list is created by putting \test # commands in the documentation. GENERATE_TESTLIST = YES # The GENERATE_BUGLIST tag can be used to enable (YES) or # disable (NO) the bug list. This list is created by putting \bug # commands in the documentation. GENERATE_BUGLIST = YES # The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or # disable (NO) the deprecated list. This list is created by putting # \deprecated commands in the documentation. GENERATE_DEPRECATEDLIST= YES # The ENABLED_SECTIONS tag can be used to enable conditional # documentation sections, marked by \if sectionname ... \endif. ENABLED_SECTIONS = # The MAX_INITIALIZER_LINES tag determines the maximum number of lines # the initial value of a variable or macro consists of for it to appear in # the documentation. If the initializer consists of more lines than specified # here it will be hidden. Use a value of 0 to hide initializers completely. # The appearance of the initializer of individual variables and macros in the # documentation can be controlled using \showinitializer or \hideinitializer # command in the documentation regardless of this setting. MAX_INITIALIZER_LINES = 30 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated # at the bottom of the documentation of classes and structs. If set to YES the # list will mention the files that were used to generate the documentation. SHOW_USED_FILES = YES # If the sources in your project are distributed over multiple directories # then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy # in the documentation. The default is NO. SHOW_DIRECTORIES = NO # Set the SHOW_FILES tag to NO to disable the generation of the Files page. # This will remove the Files entry from the Quick Index and from the # Folder Tree View (if specified). The default is YES. SHOW_FILES = YES # Set the SHOW_NAMESPACES tag to NO to disable the generation of the # Namespaces page. # This will remove the Namespaces entry from the Quick Index # and from the Folder Tree View (if specified). The default is YES. SHOW_NAMESPACES = YES # The FILE_VERSION_FILTER tag can be used to specify a program or script that # doxygen should invoke to get the current version for each file (typically from # the version control system). Doxygen will invoke the program by executing (via # popen()) the command , where is the value of # the FILE_VERSION_FILTER tag, and is the name of an input file # provided by doxygen. Whatever the program writes to standard output # is used as the file version. See the manual for examples. FILE_VERSION_FILTER = # The LAYOUT_FILE tag can be used to specify a layout file which will be parsed # by doxygen. The layout file controls the global structure of the generated # output files in an output format independent way. The create the layout file # that represents doxygen's defaults, run doxygen with the -l option. # You can optionally specify a file name after the option, if omitted # DoxygenLayout.xml will be used as the name of the layout file. LAYOUT_FILE = #--------------------------------------------------------------------------- # configuration options related to warning and progress messages #--------------------------------------------------------------------------- # The QUIET tag can be used to turn on/off the messages that are generated # by doxygen. Possible values are YES and NO. If left blank NO is used. QUIET = NO # The WARNINGS tag can be used to turn on/off the warning messages that are # generated by doxygen. Possible values are YES and NO. If left blank # NO is used. WARNINGS = YES # If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings # for undocumented members. If EXTRACT_ALL is set to YES then this flag will # automatically be disabled. WARN_IF_UNDOCUMENTED = YES # If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for # potential errors in the documentation, such as not documenting some # parameters in a documented function, or documenting parameters that # don't exist or using markup commands wrongly. WARN_IF_DOC_ERROR = YES # The WARN_NO_PARAMDOC option can be enabled to get warnings for # functions that are documented, but have no documentation for their parameters # or return value. If set to NO (the default) doxygen will only warn about # wrong or incomplete parameter documentation, but not about the absence of # documentation. WARN_NO_PARAMDOC = NO # The WARN_FORMAT tag determines the format of the warning messages that # doxygen can produce. The string should contain the $file, $line, and $text # tags, which will be replaced by the file and line number from which the # warning originated and the warning text. Optionally the format may contain # $version, which will be replaced by the version of the file (if it could # be obtained via FILE_VERSION_FILTER) WARN_FORMAT = "$file:$line: $text" # The WARN_LOGFILE tag can be used to specify a file to which warning # and error messages should be written. If left blank the output is written # to stderr. WARN_LOGFILE = #--------------------------------------------------------------------------- # configuration options related to the input files #--------------------------------------------------------------------------- # The INPUT tag can be used to specify the files and/or directories that contain # documented source files. You may enter file names like "myfile.cpp" or # directories like "/usr/src/myproject". Separate the files or directories # with spaces. INPUT = include \ docs # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is # also the default input encoding. Doxygen uses libiconv (or the iconv built # into libc) for the transcoding. See http://www.gnu.org/software/libiconv for # the list of possible encodings. INPUT_ENCODING = UTF-8 # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank the following patterns are tested: # *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh # *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py # *.f90 *.f *.for *.vhd *.vhdl FILE_PATTERNS = *.cpp \ *.cc \ *.c \ *.h \ *.hh \ *.hpp \ *.dox # The RECURSIVE tag can be used to turn specify whether or not subdirectories # should be searched for input files as well. Possible values are YES and NO. # If left blank NO is used. RECURSIVE = NO # The EXCLUDE tag can be used to specify files and/or directories that should # excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. EXCLUDE = # The EXCLUDE_SYMLINKS tag can be used select whether or not files or # directories that are symbolic links (a Unix file system feature) are excluded # from the input. EXCLUDE_SYMLINKS = NO # If the value of the INPUT tag contains directories, you can use the # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude # certain files from those directories. Note that the wildcards are matched # against the file with absolute path, so to exclude all test directories # for example use the pattern */test/* EXCLUDE_PATTERNS = */.git/* # The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names # (namespaces, classes, functions, etc.) that should be excluded from the # output. The symbol name can be a fully qualified name, a word, or if the # wildcard * is used, a substring. Examples: ANamespace, AClass, # AClass::ANamespace, ANamespace::*Test EXCLUDE_SYMBOLS = # The EXAMPLE_PATH tag can be used to specify one or more files or # directories that contain example code fragments that are included (see # the \include command). EXAMPLE_PATH = # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank all files are included. EXAMPLE_PATTERNS = # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be # searched for input files to be used with the \include or \dontinclude # commands irrespective of the value of the RECURSIVE tag. # Possible values are YES and NO. If left blank NO is used. EXAMPLE_RECURSIVE = NO # The IMAGE_PATH tag can be used to specify one or more files or # directories that contain image that are included in the documentation (see # the \image command). IMAGE_PATH = # The INPUT_FILTER tag can be used to specify a program that doxygen should # invoke to filter for each input file. Doxygen will invoke the filter program # by executing (via popen()) the command , where # is the value of the INPUT_FILTER tag, and is the name of an # input file. Doxygen will then use the output that the filter program writes # to standard output. # If FILTER_PATTERNS is specified, this tag will be # ignored. INPUT_FILTER = # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern # basis. # Doxygen will compare the file name with each pattern and apply the # filter if there is a match. # The filters are a list of the form: # pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further # info on how filters are used. If FILTER_PATTERNS is empty or if # non of the patterns match the file name, INPUT_FILTER is applied. FILTER_PATTERNS = # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using # INPUT_FILTER) will be used to filter the input files when producing source # files to browse (i.e. when SOURCE_BROWSER is set to YES). FILTER_SOURCE_FILES = NO # The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file # pattern. A pattern will override the setting for FILTER_PATTERN (if any) # and it is also possible to disable source filtering for a specific pattern # using *.ext= (so without naming a filter). This option only has effect when # FILTER_SOURCE_FILES is enabled. FILTER_SOURCE_PATTERNS = #--------------------------------------------------------------------------- # configuration options related to source browsing #--------------------------------------------------------------------------- # If the SOURCE_BROWSER tag is set to YES then a list of source files will # be generated. Documented entities will be cross-referenced with these sources. # Note: To get rid of all source code in the generated output, make sure also # VERBATIM_HEADERS is set to NO. SOURCE_BROWSER = NO # Setting the INLINE_SOURCES tag to YES will include the body # of functions and classes directly in the documentation. INLINE_SOURCES = NO # Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct # doxygen to hide any special comment blocks from generated source code # fragments. Normal C and C++ comments will always remain visible. STRIP_CODE_COMMENTS = YES # If the REFERENCED_BY_RELATION tag is set to YES # then for each documented function all documented # functions referencing it will be listed. REFERENCED_BY_RELATION = NO # If the REFERENCES_RELATION tag is set to YES # then for each documented function all documented entities # called/used by that function will be listed. REFERENCES_RELATION = NO # If the REFERENCES_LINK_SOURCE tag is set to YES (the default) # and SOURCE_BROWSER tag is set to YES, then the hyperlinks from # functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will # link to the source code. # Otherwise they will link to the documentation. REFERENCES_LINK_SOURCE = YES # If the USE_HTAGS tag is set to YES then the references to source code # will point to the HTML generated by the htags(1) tool instead of doxygen # built-in source browser. The htags tool is part of GNU's global source # tagging system (see http://www.gnu.org/software/global/global.html). You # will need version 4.8.6 or higher. USE_HTAGS = NO # If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen # will generate a verbatim copy of the header file for each class for # which an include is specified. Set to NO to disable this. VERBATIM_HEADERS = YES #--------------------------------------------------------------------------- # configuration options related to the alphabetical class index #--------------------------------------------------------------------------- # If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index # of all compounds will be generated. Enable this if the project # contains a lot of classes, structs, unions or interfaces. ALPHABETICAL_INDEX = NO # If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then # the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns # in which this list will be split (can be a number in the range [1..20]) COLS_IN_ALPHA_INDEX = 5 # In case all classes in a project start with a common prefix, all # classes will be put under the same header in the alphabetical index. # The IGNORE_PREFIX tag can be used to specify one or more prefixes that # should be ignored while generating the index headers. IGNORE_PREFIX = #--------------------------------------------------------------------------- # configuration options related to the HTML output #--------------------------------------------------------------------------- # If the GENERATE_HTML tag is set to YES (the default) Doxygen will # generate HTML output. GENERATE_HTML = YES # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `html' will be used as the default path. HTML_OUTPUT = html # The HTML_FILE_EXTENSION tag can be used to specify the file extension for # each generated HTML page (for example: .htm,.php,.asp). If it is left blank # doxygen will generate files with .html extension. HTML_FILE_EXTENSION = .html # The HTML_HEADER tag can be used to specify a personal HTML header for # each generated HTML page. If it is left blank doxygen will generate a # standard header. HTML_HEADER = # The HTML_FOOTER tag can be used to specify a personal HTML footer for # each generated HTML page. If it is left blank doxygen will generate a # standard footer. HTML_FOOTER = # The HTML_STYLESHEET tag can be used to specify a user-defined cascading # style sheet that is used by each HTML page. It can be used to # fine-tune the look of the HTML output. If the tag is left blank doxygen # will generate a default style sheet. Note that doxygen will try to copy # the style sheet file to the HTML output directory, so don't put your own # stylesheet in the HTML output directory as well, or it will be erased! HTML_STYLESHEET = # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. # Doxygen will adjust the colors in the stylesheet and background images # according to this color. Hue is specified as an angle on a colorwheel, # see http://en.wikipedia.org/wiki/Hue for more information. # For instance the value 0 represents red, 60 is yellow, 120 is green, # 180 is cyan, 240 is blue, 300 purple, and 360 is red again. # The allowed range is 0 to 359. HTML_COLORSTYLE_HUE = 220 # The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of # the colors in the HTML output. For a value of 0 the output will use # grayscales only. A value of 255 will produce the most vivid colors. HTML_COLORSTYLE_SAT = 100 # The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to # the luminance component of the colors in the HTML output. Values below # 100 gradually make the output lighter, whereas values above 100 make # the output darker. The value divided by 100 is the actual gamma applied, # so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2, # and 100 does not change the gamma. HTML_COLORSTYLE_GAMMA = 80 # If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML # page will contain the date and time when the page was generated. Setting # this to NO can help when comparing the output of multiple runs. HTML_TIMESTAMP = NO # If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, # files or namespaces will be aligned in HTML using tables. If set to # NO a bullet list will be used. HTML_ALIGN_MEMBERS = YES # If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML # documentation will contain sections that can be hidden and shown after the # page has loaded. For this to work a browser that supports # JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox # Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari). HTML_DYNAMIC_SECTIONS = NO # If the GENERATE_DOCSET tag is set to YES, additional index files # will be generated that can be used as input for Apple's Xcode 3 # integrated development environment, introduced with OSX 10.5 (Leopard). # To create a documentation set, doxygen will generate a Makefile in the # HTML output directory. Running make will produce the docset in that # directory and running "make install" will install the docset in # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find # it at startup. # See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html # for more information. GENERATE_DOCSET = NO # When GENERATE_DOCSET tag is set to YES, this tag determines the name of the # feed. A documentation feed provides an umbrella under which multiple # documentation sets from a single provider (such as a company or product suite) # can be grouped. DOCSET_FEEDNAME = "Doxygen generated docs" # When GENERATE_DOCSET tag is set to YES, this tag specifies a string that # should uniquely identify the documentation set bundle. This should be a # reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen # will append .docset to the name. DOCSET_BUNDLE_ID = org.doxygen.Project # When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely identify # the documentation publisher. This should be a reverse domain-name style # string, e.g. com.mycompany.MyDocSet.documentation. DOCSET_PUBLISHER_ID = org.doxygen.Publisher # The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher. DOCSET_PUBLISHER_NAME = Publisher # If the GENERATE_HTMLHELP tag is set to YES, additional index files # will be generated that can be used as input for tools like the # Microsoft HTML help workshop to generate a compiled HTML help file (.chm) # of the generated HTML documentation. GENERATE_HTMLHELP = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can # be used to specify the file name of the resulting .chm file. You # can add a path in front of the file if the result should not be # written to the html output directory. CHM_FILE = # If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can # be used to specify the location (absolute path including file name) of # the HTML help compiler (hhc.exe). If non-empty doxygen will try to run # the HTML help compiler on the generated index.hhp. HHC_LOCATION = # If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag # controls if a separate .chi index file is generated (YES) or that # it should be included in the master .chm file (NO). GENERATE_CHI = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING # is used to encode HtmlHelp index (hhk), content (hhc) and project file # content. CHM_INDEX_ENCODING = # If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag # controls whether a binary table of contents is generated (YES) or a # normal table of contents (NO) in the .chm file. BINARY_TOC = NO # The TOC_EXPAND flag can be set to YES to add extra items for group members # to the contents of the HTML help documentation and to the tree view. TOC_EXPAND = NO # If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and # QHP_VIRTUAL_FOLDER are set, an additional index file will be generated # that can be used as input for Qt's qhelpgenerator to generate a # Qt Compressed Help (.qch) of the generated HTML documentation. GENERATE_QHP = NO # If the QHG_LOCATION tag is specified, the QCH_FILE tag can # be used to specify the file name of the resulting .qch file. # The path specified is relative to the HTML output folder. QCH_FILE = # The QHP_NAMESPACE tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#namespace QHP_NAMESPACE = # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#virtual-folders QHP_VIRTUAL_FOLDER = doc # If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to # add. For more information please see # http://doc.trolltech.com/qthelpproject.html#custom-filters QHP_CUST_FILTER_NAME = # The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the # custom filter to add. For more information please see # # Qt Help Project / Custom Filters. QHP_CUST_FILTER_ATTRS = # The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this # project's # filter section matches. # # Qt Help Project / Filter Attributes. QHP_SECT_FILTER_ATTRS = # If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can # be used to specify the location of Qt's qhelpgenerator. # If non-empty doxygen will try to run qhelpgenerator on the generated # .qhp file. QHG_LOCATION = # If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files # will be generated, which together with the HTML files, form an Eclipse help # plugin. To install this plugin and make it available under the help contents # menu in Eclipse, the contents of the directory containing the HTML and XML # files needs to be copied into the plugins directory of eclipse. The name of # the directory within the plugins directory should be the same as # the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before # the help appears. GENERATE_ECLIPSEHELP = NO # A unique identifier for the eclipse help plugin. When installing the plugin # the directory name containing the HTML and XML files should also have # this name. ECLIPSE_DOC_ID = org.doxygen.Project # The DISABLE_INDEX tag can be used to turn on/off the condensed index at # top of each HTML page. The value NO (the default) enables the index and # the value YES disables it. DISABLE_INDEX = NO # This tag can be used to set the number of enum values (range [0,1..20]) # that doxygen will group on one line in the generated HTML documentation. # Note that a value of 0 will completely suppress the enum values from appearing in the overview section. ENUM_VALUES_PER_LINE = 4 # The GENERATE_TREEVIEW tag is used to specify whether a tree-like index # structure should be generated to display hierarchical information. # If the tag value is set to YES, a side panel will be generated # containing a tree-like index structure (just like the one that # is generated for HTML Help). For this to work a browser that supports # JavaScript, DHTML, CSS and frames is required (i.e. any modern browser). # Windows users are probably better off using the HTML help feature. GENERATE_TREEVIEW = NONE # By enabling USE_INLINE_TREES, doxygen will generate the Groups, Directories, # and Class Hierarchy pages using a tree view instead of an ordered list. USE_INLINE_TREES = NO # If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be # used to set the initial width (in pixels) of the frame in which the tree # is shown. TREEVIEW_WIDTH = 250 # When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open # links to external symbols imported via tag files in a separate window. EXT_LINKS_IN_WINDOW = NO # Use this tag to change the font size of Latex formulas included # as images in the HTML documentation. The default is 10. Note that # when you change the font size after a successful doxygen run you need # to manually remove any form_*.png images from the HTML output directory # to force them to be regenerated. FORMULA_FONTSIZE = 10 # Use the FORMULA_TRANPARENT tag to determine whether or not the images # generated for formulas are transparent PNGs. Transparent PNGs are # not supported properly for IE 6.0, but are supported on all modern browsers. # Note that when changing this option you need to delete any form_*.png files # in the HTML output before the changes have effect. FORMULA_TRANSPARENT = YES # Enable the USE_MATHJAX option to render LaTeX formulas using MathJax # (see http://www.mathjax.org) which uses client side Javascript for the # rendering instead of using prerendered bitmaps. Use this if you do not # have LaTeX installed or if you want to formulas look prettier in the HTML # output. When enabled you also need to install MathJax separately and # configure the path to it using the MATHJAX_RELPATH option. USE_MATHJAX = NO # When MathJax is enabled you need to specify the location relative to the # HTML output directory using the MATHJAX_RELPATH option. The destination # directory should contain the MathJax.js script. For instance, if the mathjax # directory is located at the same level as the HTML output directory, then # MATHJAX_RELPATH should be ../mathjax. The default value points to the mathjax.org site, so you can quickly see the result without installing # MathJax, but it is strongly recommended to install a local copy of MathJax # before deployment. MATHJAX_RELPATH = http://www.mathjax.org/mathjax # When the SEARCHENGINE tag is enabled doxygen will generate a search box # for the HTML output. The underlying search engine uses javascript # and DHTML and should work on any modern browser. Note that when using # HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets # (GENERATE_DOCSET) there is already a search function so this one should # typically be disabled. For large projects the javascript based search engine # can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution. SEARCHENGINE = NO # When the SERVER_BASED_SEARCH tag is enabled the search engine will be # implemented using a PHP enabled web server instead of at the web client # using Javascript. Doxygen will generate the search PHP script and index # file to put on the web server. The advantage of the server # based approach is that it scales better to large projects and allows # full text search. The disadvantages are that it is more difficult to setup # and does not have live searching capabilities. SERVER_BASED_SEARCH = NO #--------------------------------------------------------------------------- # configuration options related to the LaTeX output #--------------------------------------------------------------------------- # If the GENERATE_LATEX tag is set to YES (the default) Doxygen will # generate Latex output. GENERATE_LATEX = YES # The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `latex' will be used as the default path. LATEX_OUTPUT = latex # The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be # invoked. If left blank `latex' will be used as the default command name. # Note that when enabling USE_PDFLATEX this option is only used for # generating bitmaps for formulas in the HTML output, but not in the # Makefile that is written to the output directory. LATEX_CMD_NAME = latex # The MAKEINDEX_CMD_NAME tag can be used to specify the command name to # generate index for LaTeX. If left blank `makeindex' will be used as the # default command name. MAKEINDEX_CMD_NAME = makeindex # If the COMPACT_LATEX tag is set to YES Doxygen generates more compact # LaTeX documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_LATEX = NO # The PAPER_TYPE tag can be used to set the paper type that is used # by the printer. Possible values are: a4, letter, legal and # executive. If left blank a4wide will be used. PAPER_TYPE = a4wide # The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX # packages that should be included in the LaTeX output. EXTRA_PACKAGES = # The LATEX_HEADER tag can be used to specify a personal LaTeX header for # the generated latex document. The header should contain everything until # the first chapter. If it is left blank doxygen will generate a # standard header. Notice: only use this tag if you know what you are doing! LATEX_HEADER = # If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated # is prepared for conversion to pdf (using ps2pdf). The pdf file will # contain links (just like the HTML output) instead of page references # This makes the output suitable for online browsing using a pdf viewer. PDF_HYPERLINKS = YES # If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of # plain latex in the generated Makefile. Set this option to YES to get a # higher quality PDF documentation. USE_PDFLATEX = YES # If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. # command to the generated LaTeX files. This will instruct LaTeX to keep # running if errors occur, instead of asking the user for help. # This option is also used when generating formulas in HTML. LATEX_BATCHMODE = NO # If LATEX_HIDE_INDICES is set to YES then doxygen will not # include the index chapters (such as File Index, Compound Index, etc.) # in the output. LATEX_HIDE_INDICES = NO # If LATEX_SOURCE_CODE is set to YES then doxygen will include # source code with syntax highlighting in the LaTeX output. # Note that which sources are shown also depends on other settings # such as SOURCE_BROWSER. LATEX_SOURCE_CODE = NO #--------------------------------------------------------------------------- # configuration options related to the RTF output #--------------------------------------------------------------------------- # If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output # The RTF output is optimized for Word 97 and may not look very pretty with # other RTF readers or editors. GENERATE_RTF = NO # The RTF_OUTPUT tag is used to specify where the RTF docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `rtf' will be used as the default path. RTF_OUTPUT = rtf # If the COMPACT_RTF tag is set to YES Doxygen generates more compact # RTF documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_RTF = NO # If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated # will contain hyperlink fields. The RTF file will # contain links (just like the HTML output) instead of page references. # This makes the output suitable for online browsing using WORD or other # programs which support those fields. # Note: wordpad (write) and others do not support links. RTF_HYPERLINKS = NO # Load stylesheet definitions from file. Syntax is similar to doxygen's # config file, i.e. a series of assignments. You only have to provide # replacements, missing definitions are set to their default value. RTF_STYLESHEET_FILE = # Set optional variables used in the generation of an rtf document. # Syntax is similar to doxygen's config file. RTF_EXTENSIONS_FILE = #--------------------------------------------------------------------------- # configuration options related to the man page output #--------------------------------------------------------------------------- # If the GENERATE_MAN tag is set to YES (the default) Doxygen will # generate man pages GENERATE_MAN = YES # The MAN_OUTPUT tag is used to specify where the man pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `man' will be used as the default path. MAN_OUTPUT = man # The MAN_EXTENSION tag determines the extension that is added to # the generated man pages (default is the subroutine's section .3) MAN_EXTENSION = .3 # If the MAN_LINKS tag is set to YES and Doxygen generates man output, # then it will generate one additional man file for each entity # documented in the real man page(s). These additional files # only source the real man page, but without them the man command # would be unable to find the correct page. The default is NO. MAN_LINKS = NO #--------------------------------------------------------------------------- # configuration options related to the XML output #--------------------------------------------------------------------------- # If the GENERATE_XML tag is set to YES Doxygen will # generate an XML file that captures the structure of # the code including all documentation. GENERATE_XML = NO # The XML_OUTPUT tag is used to specify where the XML pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `xml' will be used as the default path. XML_OUTPUT = xml # The XML_SCHEMA tag can be used to specify an XML schema, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_SCHEMA = # The XML_DTD tag can be used to specify an XML DTD, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_DTD = # If the XML_PROGRAMLISTING tag is set to YES Doxygen will # dump the program listings (including syntax highlighting # and cross-referencing information) to the XML output. Note that # enabling this will significantly increase the size of the XML output. XML_PROGRAMLISTING = YES #--------------------------------------------------------------------------- # configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- # If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will # generate an AutoGen Definitions (see autogen.sf.net) file # that captures the structure of the code including all # documentation. Note that this feature is still experimental # and incomplete at the moment. GENERATE_AUTOGEN_DEF = NO #--------------------------------------------------------------------------- # configuration options related to the Perl module output #--------------------------------------------------------------------------- # If the GENERATE_PERLMOD tag is set to YES Doxygen will # generate a Perl module file that captures the structure of # the code including all documentation. Note that this # feature is still experimental and incomplete at the # moment. GENERATE_PERLMOD = NO # If the PERLMOD_LATEX tag is set to YES Doxygen will generate # the necessary Makefile rules, Perl scripts and LaTeX code to be able # to generate PDF and DVI output from the Perl module output. PERLMOD_LATEX = NO # If the PERLMOD_PRETTY tag is set to YES the Perl module output will be # nicely formatted so it can be parsed by a human reader. # This is useful # if you want to understand what is going on. # On the other hand, if this # tag is set to NO the size of the Perl module output will be much smaller # and Perl will parse it just the same. PERLMOD_PRETTY = YES # The names of the make variables in the generated doxyrules.make file # are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. # This is useful so different doxyrules.make files included by the same # Makefile don't overwrite each other's variables. PERLMOD_MAKEVAR_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the preprocessor #--------------------------------------------------------------------------- # If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will # evaluate all C-preprocessor directives found in the sources and include # files. ENABLE_PREPROCESSING = YES # If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro # names in the source code. If set to NO (the default) only conditional # compilation will be performed. Macro expansion can be done in a controlled # way by setting EXPAND_ONLY_PREDEF to YES. MACRO_EXPANSION = YES # If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES # then the macro expansion is limited to the macros specified with the # PREDEFINED and EXPAND_AS_DEFINED tags. EXPAND_ONLY_PREDEF = YES # If the SEARCH_INCLUDES tag is set to YES (the default) the includes files # in the INCLUDE_PATH (see below) will be search if a #include is found. SEARCH_INCLUDES = YES # The INCLUDE_PATH tag can be used to specify one or more directories that # contain include files that are not input files but should be processed by # the preprocessor. INCLUDE_PATH = # You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard # patterns (like *.h and *.hpp) to filter out the header-files in the # directories. If left blank, the patterns specified with FILE_PATTERNS will # be used. INCLUDE_FILE_PATTERNS = # The PREDEFINED tag can be used to specify one or more macro names that # are defined before the preprocessor is started (similar to the -D option of # gcc). The argument of the tag is a list of macros of the form: name # or name=definition (no spaces). If the definition and the = are # omitted =1 is assumed. To prevent a macro definition from being # undefined via #undef or recursively expanded use the := operator # instead of the = operator. PREDEFINED = DOXYGEN \ PRINTF_ATTRIBUTE(x,y)= # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then # this tag can be used to specify a list of macro names that should be expanded. # The macro definition that is found in the sources will be used. # Use the PREDEFINED tag if you want to use a different macro definition that overrules the definition found in the source code. EXPAND_AS_DEFINED = # If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then # doxygen's preprocessor will remove all references to function-like macros # that are alone on a line, have an all uppercase name, and do not end with a # semicolon, because these will confuse the parser if not removed. SKIP_FUNCTION_MACROS = YES #--------------------------------------------------------------------------- # Configuration::additions related to external references #--------------------------------------------------------------------------- # The TAGFILES option can be used to specify one or more tagfiles. # Optionally an initial location of the external documentation # can be added for each tagfile. The format of a tag file without # this location is as follows: # # TAGFILES = file1 file2 ... # Adding location for the tag files is done as follows: # # TAGFILES = file1=loc1 "file2 = loc2" ... # where "loc1" and "loc2" can be relative or absolute paths or # URLs. If a location is present for each tag, the installdox tool # does not have to be run to correct the links. # Note that each tag file must have a unique name # (where the name does NOT include the path) # If a tag file is not located in the directory in which doxygen # is run, you must also specify the path to the tagfile here. TAGFILES = # When a file name is specified after GENERATE_TAGFILE, doxygen will create # a tag file that is based on the input files it reads. GENERATE_TAGFILE = # If the ALLEXTERNALS tag is set to YES all external classes will be listed # in the class index. If set to NO only the inherited external classes # will be listed. ALLEXTERNALS = NO # If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed # in the modules index. If set to NO, only the current project's groups will # be listed. EXTERNAL_GROUPS = YES # The PERL_PATH should be the absolute path and name of the perl script # interpreter (i.e. the result of `which perl'). PERL_PATH = /usr/bin/perl #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- # If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will # generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base # or super classes. Setting the tag to NO turns the diagrams off. Note that # this option also works with HAVE_DOT disabled, but it is recommended to # install and use dot, since it yields more powerful graphs. CLASS_DIAGRAMS = YES # You can define message sequence charts within doxygen comments using the \msc # command. Doxygen will then run the mscgen tool (see # http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the # documentation. The MSCGEN_PATH tag allows you to specify the directory where # the mscgen tool resides. If left empty the tool is assumed to be found in the # default search path. MSCGEN_PATH = # If set to YES, the inheritance and collaboration graphs will hide # inheritance and usage relations if the target is undocumented # or is not a class. HIDE_UNDOC_RELATIONS = YES # If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is # available from the path. This tool is part of Graphviz, a graph visualization # toolkit from AT&T and Lucent Bell Labs. The other options in this section # have no effect if this option is set to NO (the default) HAVE_DOT = NO # The DOT_NUM_THREADS specifies the number of dot invocations doxygen is # allowed to run in parallel. When set to 0 (the default) doxygen will # base this on the number of processors available in the system. You can set it # explicitly to a value larger than 0 to get control over the balance # between CPU load and processing speed. DOT_NUM_THREADS = 0 # By default doxygen will write a font called Helvetica to the output # directory and reference it in all dot files that doxygen generates. # When you want a differently looking font you can specify the font name # using DOT_FONTNAME. You need to make sure dot is able to find the font, # which can be done by putting it in a standard location or by setting the # DOTFONTPATH environment variable or by setting DOT_FONTPATH to the directory # containing the font. DOT_FONTNAME = FreeSans # The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. # The default size is 10pt. DOT_FONTSIZE = 10 # By default doxygen will tell dot to use the output directory to look for the # FreeSans.ttf font (which doxygen will put there itself). If you specify a # different font using DOT_FONTNAME you can set the path where dot # can find it using this tag. DOT_FONTPATH = # If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect inheritance relations. Setting this tag to YES will force the # the CLASS_DIAGRAMS tag to NO. CLASS_GRAPH = YES # If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect implementation dependencies (inheritance, containment, and # class references variables) of the class with other documented classes. COLLABORATION_GRAPH = YES # If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen # will generate a graph for groups, showing the direct groups dependencies GROUP_GRAPHS = YES # If the UML_LOOK tag is set to YES doxygen will generate inheritance and # collaboration diagrams in a style similar to the OMG's Unified Modeling # Language. UML_LOOK = NO # If set to YES, the inheritance and collaboration graphs will show the # relations between templates and their instances. TEMPLATE_RELATIONS = NO # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT # tags are set to YES then doxygen will generate a graph for each documented # file showing the direct and indirect include dependencies of the file with # other documented files. INCLUDE_GRAPH = YES # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and # HAVE_DOT tags are set to YES then doxygen will generate a graph for each # documented header file showing the documented files that directly or # indirectly include this file. INCLUDED_BY_GRAPH = YES # If the CALL_GRAPH and HAVE_DOT options are set to YES then # doxygen will generate a call dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable call graphs # for selected functions only using the \callgraph command. CALL_GRAPH = NO # If the CALLER_GRAPH and HAVE_DOT tags are set to YES then # doxygen will generate a caller dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable caller # graphs for selected functions only using the \callergraph command. CALLER_GRAPH = NO # If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen # will generate a graphical hierarchy of all classes instead of a textual one. GRAPHICAL_HIERARCHY = YES # If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES # then doxygen will show the dependencies a directory has on other directories # in a graphical way. The dependency relations are determined by the #include # relations between the files in the directories. DIRECTORY_GRAPH = YES # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images # generated by dot. Possible values are png, svg, gif or svg. # If left blank png will be used. DOT_IMAGE_FORMAT = png # The tag DOT_PATH can be used to specify the path where the dot tool can be # found. If left blank, it is assumed the dot tool can be found in the path. DOT_PATH = # The DOTFILE_DIRS tag can be used to specify one or more directories that # contain dot files that are included in the documentation (see the # \dotfile command). DOTFILE_DIRS = # The MSCFILE_DIRS tag can be used to specify one or more directories that # contain msc files that are included in the documentation (see the # \mscfile command). MSCFILE_DIRS = # The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of # nodes that will be shown in the graph. If the number of nodes in a graph # becomes larger than this value, doxygen will truncate the graph, which is # visualized by representing a node as a red box. Note that doxygen if the # number of direct children of the root node in a graph is already larger than # DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note # that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. DOT_GRAPH_MAX_NODES = 50 # The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the # graphs generated by dot. A depth value of 3 means that only nodes reachable # from the root by following a path via at most 3 edges will be shown. Nodes # that lay further from the root node will be omitted. Note that setting this # option to 1 or 2 may greatly reduce the computation time needed for large # code bases. Also note that the size of a graph can be further restricted by # DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. MAX_DOT_GRAPH_DEPTH = 0 # Set the DOT_TRANSPARENT tag to YES to generate images with a transparent # background. This is disabled by default, because dot on Windows does not # seem to support this out of the box. Warning: Depending on the platform used, # enabling this option may lead to badly anti-aliased labels on the edges of # a graph (i.e. they become hard to read). DOT_TRANSPARENT = YES # Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output # files in one run (i.e. multiple -o and -T options on the command line). This # makes dot run faster, but since only newer versions of dot (>1.8.10) # support this, this feature is disabled by default. DOT_MULTI_TARGETS = NO # If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will # generate a legend page explaining the meaning of the various boxes and # arrows in the dot generated graphs. GENERATE_LEGEND = YES # If the DOT_CLEANUP tag is set to YES (the default) Doxygen will # remove the intermediate dot files that are used to generate # the various graphs. DOT_CLEANUP = YES tdb-1.4.2/include/tdb.h0000660000000000000000000010203513444661620014577 0ustar rootroot00000000000000#ifndef __TDB_H__ #define __TDB_H__ /* Unix SMB/CIFS implementation. trivial database library Copyright (C) Andrew Tridgell 1999-2004 ** NOTE! The following LGPL license applies to the tdb ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifdef __cplusplus extern "C" { #endif #include #include /** * @defgroup tdb The tdb API * * tdb is a Trivial database. In concept, it is very much like GDBM, and BSD's * DB except that it allows multiple simultaneous writers and uses locking * internally to keep writers from trampling on each other. tdb is also * extremely small. * * @section tdb_interface Interface * * The interface is very similar to gdbm except for the following: * *
    *
  • different open interface. The tdb_open call is more similar to a * traditional open()
  • *
  • no tdbm_reorganise() function
  • *
  • no tdbm_sync() function. No operations are cached in the library * anyway
  • *
  • added a tdb_traverse() function for traversing the whole database
  • *
  • added transactions support
  • *
* * A general rule for using tdb is that the caller frees any returned TDB_DATA * structures. Just call free(p.dptr) to free a TDB_DATA return value called p. * This is the same as gdbm. * * @{ */ /** Flags to tdb_store() */ #define TDB_REPLACE 1 /** Unused */ #define TDB_INSERT 2 /** Don't overwrite an existing entry */ #define TDB_MODIFY 3 /** Don't create an existing entry */ /** Flags for tdb_open() */ #define TDB_DEFAULT 0 /** just a readability place holder */ #define TDB_CLEAR_IF_FIRST 1 /** If this is the first open, wipe the db */ #define TDB_INTERNAL 2 /** Don't store on disk */ #define TDB_NOLOCK 4 /** Don't do any locking */ #define TDB_NOMMAP 8 /** Don't use mmap */ #define TDB_CONVERT 16 /** Convert endian (internal use) */ #define TDB_BIGENDIAN 32 /** Header is big-endian (internal use) */ #define TDB_NOSYNC 64 /** Don't use synchronous transactions */ #define TDB_SEQNUM 128 /** Maintain a sequence number */ #define TDB_VOLATILE 256 /** Activate the per-hashchain freelist, default 5 */ #define TDB_ALLOW_NESTING 512 /** Allow transactions to nest */ #define TDB_DISALLOW_NESTING 1024 /** Disallow transactions to nest */ #define TDB_INCOMPATIBLE_HASH 2048 /** Better hashing: can't be opened by tdb < 1.2.6. */ #define TDB_MUTEX_LOCKING 4096 /** optimized locking using robust mutexes if supported, only with tdb >= 1.3.0 and TDB_CLEAR_IF_FIRST after checking tdb_runtime_check_for_robust_mutexes() */ /** The tdb error codes */ enum TDB_ERROR {TDB_SUCCESS=0, TDB_ERR_CORRUPT, TDB_ERR_IO, TDB_ERR_LOCK, TDB_ERR_OOM, TDB_ERR_EXISTS, TDB_ERR_NOLOCK, TDB_ERR_LOCK_TIMEOUT, TDB_ERR_NOEXIST, TDB_ERR_EINVAL, TDB_ERR_RDONLY, TDB_ERR_NESTING}; /** Debugging uses one of the following levels */ enum tdb_debug_level {TDB_DEBUG_FATAL = 0, TDB_DEBUG_ERROR, TDB_DEBUG_WARNING, TDB_DEBUG_TRACE}; /** The tdb data structure */ typedef struct TDB_DATA { unsigned char *dptr; size_t dsize; } TDB_DATA; #ifndef PRINTF_ATTRIBUTE #if (__GNUC__ >= 3) /** Use gcc attribute to check printf fns. a1 is the 1-based index of * the parameter containing the format, and a2 the index of the first * argument. Note that some gcc 2.x versions don't handle this * properly **/ #define PRINTF_ATTRIBUTE(a1, a2) __attribute__ ((format (__printf__, a1, a2))) #else #define PRINTF_ATTRIBUTE(a1, a2) #endif #endif /** This is the context structure that is returned from a db open. */ typedef struct tdb_context TDB_CONTEXT; typedef int (*tdb_traverse_func)(struct tdb_context *, TDB_DATA, TDB_DATA, void *); typedef void (*tdb_log_func)(struct tdb_context *, enum tdb_debug_level, const char *, ...) PRINTF_ATTRIBUTE(3, 4); typedef unsigned int (*tdb_hash_func)(TDB_DATA *key); struct tdb_logging_context { tdb_log_func log_fn; void *log_private; }; /** * @brief Open the database and creating it if necessary. * * @param[in] name The name of the db to open. * * @param[in] hash_size The hash size is advisory, use zero for a default * value. * * @param[in] tdb_flags The flags to use to open the db:\n\n * TDB_CLEAR_IF_FIRST - Clear database if we are the * only one with it open\n * TDB_INTERNAL - Don't use a file, instead store the * data in memory. The filename is * ignored in this case.\n * TDB_NOLOCK - Don't do any locking\n * TDB_NOMMAP - Don't use mmap\n * TDB_NOSYNC - Don't synchronise transactions to disk\n * TDB_SEQNUM - Maintain a sequence number\n * TDB_VOLATILE - activate the per-hashchain freelist, * default 5.\n * TDB_ALLOW_NESTING - Allow transactions to nest.\n * TDB_DISALLOW_NESTING - Disallow transactions to nest.\n * TDB_INCOMPATIBLE_HASH - Better hashing: can't be opened by tdb < 1.2.6.\n * TDB_MUTEX_LOCKING - Optimized locking using robust mutexes if supported, * can't be opened by tdb < 1.3.0. * Only valid in combination with TDB_CLEAR_IF_FIRST * after checking tdb_runtime_check_for_robust_mutexes()\n * * @param[in] open_flags Flags for the open(2) function. * * @param[in] mode The mode for the open(2) function. * * @return A tdb context structure, NULL on error. */ struct tdb_context *tdb_open(const char *name, int hash_size, int tdb_flags, int open_flags, mode_t mode); /** * @brief Open the database and creating it if necessary. * * This is like tdb_open(), but allows you to pass an initial logging and * hash function. Be careful when passing a hash function - all users of the * database must use the same hash function or you will get data corruption. * * @param[in] name The name of the db to open. * * @param[in] hash_size The hash size is advisory, use zero for a default * value. * * @param[in] tdb_flags The flags to use to open the db:\n\n * TDB_CLEAR_IF_FIRST - Clear database if we are the * only one with it open\n * TDB_INTERNAL - Don't use a file, instead store the * data in memory. The filename is * ignored in this case.\n * TDB_NOLOCK - Don't do any locking\n * TDB_NOMMAP - Don't use mmap\n * TDB_NOSYNC - Don't synchronise transactions to disk\n * TDB_SEQNUM - Maintain a sequence number\n * TDB_VOLATILE - activate the per-hashchain freelist, * default 5.\n * TDB_ALLOW_NESTING - Allow transactions to nest.\n * TDB_DISALLOW_NESTING - Disallow transactions to nest.\n * TDB_INCOMPATIBLE_HASH - Better hashing: can't be opened by tdb < 1.2.6.\n * TDB_MUTEX_LOCKING - Optimized locking using robust mutexes if supported, * can't be opened by tdb < 1.3.0. * Only valid in combination with TDB_CLEAR_IF_FIRST * after checking tdb_runtime_check_for_robust_mutexes()\n * * @param[in] open_flags Flags for the open(2) function. * * @param[in] mode The mode for the open(2) function. * * @param[in] log_ctx The logging function to use. * * @param[in] hash_fn The hash function you want to use. * * @return A tdb context structure, NULL on error. * * @see tdb_open() */ struct tdb_context *tdb_open_ex(const char *name, int hash_size, int tdb_flags, int open_flags, mode_t mode, const struct tdb_logging_context *log_ctx, tdb_hash_func hash_fn); /** * @brief Set the maximum number of dead records per hash chain. * * @param[in] tdb The database handle to set the maximum. * * @param[in] max_dead The maximum number of dead records per hash chain. */ void tdb_set_max_dead(struct tdb_context *tdb, int max_dead); /** * @brief Reopen a tdb. * * This can be used after a fork to ensure that we have an independent seek * pointer from our parent and to re-establish locks. * * @param[in] tdb The database to reopen. It will be free'd on error! * * @return 0 on success, -1 on error. * * @note Don't call tdb_error() after this function cause the tdb context will * be freed on error. */ int tdb_reopen(struct tdb_context *tdb); /** * @brief Reopen all tdb's * * If the parent is longlived (ie. a parent daemon architecture), we know it * will keep it's active lock on a tdb opened with CLEAR_IF_FIRST. Thus for * child processes we don't have to add an active lock. This is essential to * improve performance on systems that keep POSIX locks as a non-scalable data * structure in the kernel. * * @param[in] parent_longlived Whether the parent is longlived or not. * * @return 0 on success, -1 on error. */ int tdb_reopen_all(int parent_longlived); /** * @brief Set a different tdb logging function. * * @param[in] tdb The tdb to set the logging function. * * @param[in] log_ctx The logging function to set. */ void tdb_set_logging_function(struct tdb_context *tdb, const struct tdb_logging_context *log_ctx); /** * @brief Get the tdb last error code. * * @param[in] tdb The tdb to get the error code from. * * @return A TDB_ERROR code. * * @see TDB_ERROR */ enum TDB_ERROR tdb_error(struct tdb_context *tdb); /** * @brief Get a error string for the last tdb error * * @param[in] tdb The tdb to get the error code from. * * @return An error string. */ const char *tdb_errorstr(struct tdb_context *tdb); /** * @brief Fetch an entry in the database given a key. * * The caller must free the resulting data. * * @param[in] tdb The tdb to fetch the key. * * @param[in] key The key to fetch. * * @return The key entry found in the database, NULL on error with * TDB_ERROR set. * * @see tdb_error() * @see tdb_errorstr() */ TDB_DATA tdb_fetch(struct tdb_context *tdb, TDB_DATA key); /** * @brief Hand a record to a parser function without allocating it. * * This function is meant as a fast tdb_fetch alternative for large records * that are frequently read. The "key" and "data" arguments point directly * into the tdb shared memory, they are not aligned at any boundary. * * @warning The parser is called while tdb holds a lock on the record. DO NOT * call other tdb routines from within the parser. Also, for good performance * you should make the parser fast to allow parallel operations. * * @param[in] tdb The tdb to parse the record. * * @param[in] key The key to parse. * * @param[in] parser The parser to use to parse the data. * * @param[in] private_data A private data pointer which is passed to the parser * function. * * @return -1 if the record was not found. If the record was found, * the return value of "parser" is passed up to the caller. */ int tdb_parse_record(struct tdb_context *tdb, TDB_DATA key, int (*parser)(TDB_DATA key, TDB_DATA data, void *private_data), void *private_data); /** * @brief Delete an entry in the database given a key. * * @param[in] tdb The tdb to delete the key. * * @param[in] key The key to delete. * * @return 0 on success, -1 if the key doesn't exist. */ int tdb_delete(struct tdb_context *tdb, TDB_DATA key); /** * @brief Store an element in the database. * * This replaces any existing element with the same key. * * @param[in] tdb The tdb to store the entry. * * @param[in] key The key to use to store the entry. * * @param[in] dbuf The data to store under the key. * * @param[in] flag The flags to store the key:\n\n * TDB_INSERT: Don't overwrite an existing entry.\n * TDB_MODIFY: Don't create a new entry\n * * @return 0 on success, -1 on error with error code set. * * @see tdb_error() * @see tdb_errorstr() */ int tdb_store(struct tdb_context *tdb, TDB_DATA key, TDB_DATA dbuf, int flag); /** * @brief Store an element in the database. * * This replaces any existing element with the same key. * * @param[in] tdb The tdb to store the entry. * * @param[in] key The key to use to store the entry. * * @param[in] dbufs A vector of memory chunks to write * * @param[in] num_dbufs Length of the dbufs vector * * @param[in] flag The flags to store the key:\n\n * TDB_INSERT: Don't overwrite an existing entry.\n * TDB_MODIFY: Don't create a new entry\n * * @return 0 on success, -1 on error with error code set. * * @see tdb_error() * @see tdb_errorstr() */ int tdb_storev(struct tdb_context *tdb, TDB_DATA key, const TDB_DATA *dbufs, int num_dbufs, int flag); /** * @brief Append data to an entry. * * If the entry doesn't exist, it will create a new one. * * @param[in] tdb The database to use. * * @param[in] key The key to append the data. * * @param[in] new_dbuf The data to append to the key. * * @return 0 on success, -1 on error with error code set. * * @see tdb_error() * @see tdb_errorstr() */ int tdb_append(struct tdb_context *tdb, TDB_DATA key, TDB_DATA new_dbuf); /** * @brief Close a database. * * @param[in] tdb The database to close. The context will be free'd. * * @return 0 for success, -1 on error. * * @note Don't call tdb_error() after this function cause the tdb context will * be freed on error. */ int tdb_close(struct tdb_context *tdb); /** * @brief Find the first entry in the database and return its key. * * The caller must free the returned data. * * @param[in] tdb The database to use. * * @return The first entry of the database, an empty TDB_DATA entry * if the database is empty. */ TDB_DATA tdb_firstkey(struct tdb_context *tdb); /** * @brief Find the next entry in the database, returning its key. * * The caller must free the returned data. * * @param[in] tdb The database to use. * * @param[in] key The key from which you want the next key. * * @return The next entry of the current key, an empty TDB_DATA * entry if there is no entry. */ TDB_DATA tdb_nextkey(struct tdb_context *tdb, TDB_DATA key); /** * @brief Traverse the entire database. * * While traversing the function fn(tdb, key, data, state) is called on each * element. If fn is NULL then it is not called. A non-zero return value from * fn() indicates that the traversal should stop. Traversal callbacks may not * start transactions. * * @warning The data buffer given to the callback fn does NOT meet the alignment * restrictions malloc gives you. * * @param[in] tdb The database to traverse. * * @param[in] fn The function to call on each entry. * * @param[in] private_data The private data which should be passed to the * traversing function. * * @return The record count traversed, -1 on error. */ int tdb_traverse(struct tdb_context *tdb, tdb_traverse_func fn, void *private_data); /** * @brief Traverse the entire database. * * While traversing the database the function fn(tdb, key, data, state) is * called on each element, but marking the database read only during the * traversal, so any write operations will fail. This allows tdb to use read * locks, which increases the parallelism possible during the traversal. * * @param[in] tdb The database to traverse. * * @param[in] fn The function to call on each entry. * * @param[in] private_data The private data which should be passed to the * traversing function. * * @return The record count traversed, -1 on error. */ int tdb_traverse_read(struct tdb_context *tdb, tdb_traverse_func fn, void *private_data); /** * @brief Traverse a single hash chain * * Traverse a single hash chain under a single lock operation. No * database modification is possible in the callback. * * This exists for background cleanup of databases. In normal * operations, traversing a complete database can be much too * expensive. Databases can have many chains, which will all have to * be looked at before tdb_traverse finishes. Also tdb_traverse does a * lot of fcntl activity to protect against concurrent record deletes. * * With this you can walk a fraction of the whole tdb, collect the * entries you want to prune, leave the traverse, and then modify or * delete the records in a subsequent step. * * To walk the entire database, call this function tdb_hash_size() * times, with 0<=chain 2015-04-25 tdbbackup 8 Samba System Administration tools 3.6 tdbbackup tool for backing up and for validating the integrity of samba .tdb files tdbbackup -s suffix -v -h -n hashsize -l DESCRIPTION This tool is part of the samba 1 suite. tdbbackup is a tool that may be used to backup samba .tdb files. This tool may also be used to verify the integrity of the .tdb files prior to samba startup or during normal operation. If it finds file damage and it finds a prior backup the backup file will be restored. OPTIONS -h Get help information. -s suffix The -s option allows the administrator to specify a file backup extension. This way it is possible to keep a history of tdb backup files by using a new suffix for each backup. -v The -v will check the database for damages (corrupt data) which if detected causes the backup to be restored. -n hashsize The -n option sets the hash size for the new backup tdb. -l This options disables any locking, by passing TDB_NOLOCK to tdb_open_ex(). Only use this for database files which are not used by any other process! And also only if it is otherwise not possible to open the database, e.g. databases which were created with mutex locking. COMMANDS GENERAL INFORMATION The tdbbackup utility can safely be run at any time. It was designed so that it can be used at any time to validate the integrity of tdb files, even during Samba operation. Typical usage for the command will be: tdbbackup [-s suffix] *.tdb Before restarting samba the following command may be run to validate .tdb files: tdbbackup -v [-s suffix] *.tdb Samba .tdb files are stored in various locations, be sure to run backup all .tdb file on the system. Important files includes: secrets.tdb - usual location is in the /usr/local/samba/private directory, or on some systems in /etc/samba. passdb.tdb - usual location is in the /usr/local/samba/private directory, or on some systems in /etc/samba. *.tdb located in the /usr/local/samba/var directory or on some systems in the /var/cache or /var/lib/samba directories. VERSION This man page is correct for version 3 of the Samba suite. AUTHOR The original Samba software and related utilities were created by Andrew Tridgell. Samba is now developed by the Samba Team as an Open Source project similar to the way the Linux kernel is developed. The tdbbackup man page was written by John H Terpstra. tdb-1.4.2/man/tdbdump.8.xml0000660000000000000000000000470412520121120015314 0ustar rootroot00000000000000 2015-04-25 tdbdump 8 Samba System Administration tools 3.6 tdbdump tool for printing the contents of a TDB file tdbdump -k keyname -e -h filename DESCRIPTION This tool is part of the samba 1 suite. tdbdump is a very simple utility that 'dumps' the contents of a TDB (Trivial DataBase) file to standard output in a human-readable format. This tool can be used when debugging problems with TDB files. It is intended for those who are somewhat familiar with Samba internals. OPTIONS -h Get help information. -k keyname The -k option restricts dumping to a single key, if found. -e The -e tries to dump out from a corrupt database. Naturally, such a dump is unreliable, at best. VERSION This man page is correct for version 3 of the Samba suite. AUTHOR The original Samba software and related utilities were created by Andrew Tridgell. Samba is now developed by the Samba Team as an Open Source project similar to the way the Linux kernel is developed. The tdbdump man page was written by Jelmer Vernooij. tdb-1.4.2/man/tdbrestore.8.xml0000660000000000000000000000367312520121120016036 0ustar rootroot00000000000000 2015-04-25 tdbrestore 8 Samba System Administration tools 3.6 tdbrestore tool for creating a TDB file out of a tdbdump output tdbrestore tdbfilename DESCRIPTION This tool is part of the samba 1 suite. tdbrestore is a very simple utility that 'restores' the contents of dump file into TDB (Trivial DataBase) file. The dump file is obtained from the tdbdump command. This tool wait on the standard input for the content of the dump and will write the tdb in the tdbfilename parameter. This tool can be used for unpacking the content of tdb as backup mean. VERSION This man page is correct for version 3 of the Samba suite. AUTHOR The original Samba software and related utilities were created by Andrew Tridgell. Samba is now developed by the Samba Team as an Open Source project similar to the way the Linux kernel is developed. This tool was initially written by Volker Lendecke based on an idea by Simon McVittie. The tdbrestore man page was written by Matthieu Patou. tdb-1.4.2/man/tdbtool.8.xml0000660000000000000000000001466713100601766015353 0ustar rootroot00000000000000 2015-04-25 tdbtool 8 Samba System Administration tools 4.0 tdbtool manipulate the contents TDB files tdbtool tdbtool -l TDBFILE COMMANDS DESCRIPTION This tool is part of the samba 1 suite. tdbtool a tool for displaying and altering the contents of Samba TDB (Trivial DataBase) files. Each of the commands listed below can be entered interactively or provided on the command line. OPTIONS -l This options disables any locking, by passing TDB_NOLOCK to tdb_open_ex(). Only use this for database files which are not used by any other process! And also only if it is otherwise not possible to open the database, e.g. databases which were created with mutex locking. COMMANDS TDBFILE Create a new database named TDBFILE. TDBFILE Open an existing database named TDBFILE. Erase the current database. Dump the current database as strings. Dump the current database as connection records. Dump the current database keys as strings. Dump the current database keys as hex values. Print summary information about the current database. KEY DATA Insert a record into the current database. KEY TDBFILE Move a record from the current database into TDBFILE. KEY DATA Store (replace) a record in the current database. KEY DATA Store (replace) a record in the current database where key and data are in hex format. KEY Show a record by key. KEY Delete a record by key. Print the current database hash table and free list. Print the current database and free list. COMMAND Execute the given system command. Print the first record in the current database. Print the next record in the current database. Check the integrity of the current database. Repack a database using a temporary file to remove fragmentation. Exit tdbtool. CAVEATS The contents of the Samba TDB files are private to the implementation and should not be altered with tdbtool. VERSION This man page is correct for version 3.6 of the Samba suite. AUTHOR The original Samba software and related utilities were created by Andrew Tridgell. Samba is now developed by the Samba Team as an Open Source project similar to the way the Linux kernel is developed. tdb-1.4.2/pytdb.c0000660000000000000000000005153213527011454013521 0ustar rootroot00000000000000/* Unix SMB/CIFS implementation. Python interface to tdb. Copyright (C) 2004-2006 Tim Potter Copyright (C) 2007-2008 Jelmer Vernooij ** NOTE! The following LGPL license applies to the tdb ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include #include "replace.h" #include "system/filesys.h" /* Include tdb headers */ #include #if PY_MAJOR_VERSION >= 3 #define PyInt_FromLong PyLong_FromLong #define PyInt_Check PyLong_Check #define PyInt_AsLong PyLong_AsLong #define Py_TPFLAGS_HAVE_ITER 0 #endif /* discard signature of 'func' in favour of 'target_sig' */ #define PY_DISCARD_FUNC_SIG(target_sig, func) (target_sig)(void(*)(void))func typedef struct { PyObject_HEAD TDB_CONTEXT *ctx; bool closed; } PyTdbObject; static PyTypeObject PyTdb; static void PyErr_SetTDBError(TDB_CONTEXT *tdb) { PyErr_SetObject(PyExc_RuntimeError, Py_BuildValue("(i,s)", tdb_error(tdb), tdb_errorstr(tdb))); } static TDB_DATA PyBytes_AsTDB_DATA(PyObject *data) { TDB_DATA ret; ret.dptr = (unsigned char *)PyBytes_AsString(data); ret.dsize = PyBytes_Size(data); return ret; } static PyObject *PyBytes_FromTDB_DATA(TDB_DATA data) { if (data.dptr == NULL && data.dsize == 0) { Py_RETURN_NONE; } else { PyObject *ret = PyBytes_FromStringAndSize((const char *)data.dptr, data.dsize); free(data.dptr); return ret; } } #define PyErr_TDB_ERROR_IS_ERR_RAISE(ret, tdb) \ if (ret != 0) { \ PyErr_SetTDBError(tdb); \ return NULL; \ } #define PyErr_TDB_RAISE_IF_CLOSED(self) \ if (self->closed) { \ PyErr_SetObject(PyExc_RuntimeError, \ Py_BuildValue("(i,s)", TDB_ERR_IO, "Database is already closed")); \ return NULL; \ } #define PyErr_TDB_RAISE_RETURN_MINUS_1_IF_CLOSED(self) \ if (self->closed) { \ PyErr_SetObject(PyExc_RuntimeError, \ Py_BuildValue("(i,s)", TDB_ERR_IO, "Database is already closed")); \ return -1; \ } static PyObject *py_tdb_open(PyTypeObject *type, PyObject *args, PyObject *kwargs) { char *name = NULL; int hash_size = 0, tdb_flags = TDB_DEFAULT, flags = O_RDWR, mode = 0600; TDB_CONTEXT *ctx; PyTdbObject *ret; const char *_kwnames[] = { "name", "hash_size", "tdb_flags", "flags", "mode", NULL }; char **kwnames = discard_const_p(char *, _kwnames); if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|siiii", kwnames, &name, &hash_size, &tdb_flags, &flags, &mode)) return NULL; if (name == NULL) { tdb_flags |= TDB_INTERNAL; } ctx = tdb_open(name, hash_size, tdb_flags, flags, mode); if (ctx == NULL) { PyErr_SetFromErrno(PyExc_IOError); return NULL; } ret = PyObject_New(PyTdbObject, &PyTdb); if (!ret) { tdb_close(ctx); return NULL; } ret->ctx = ctx; ret->closed = false; return (PyObject *)ret; } static PyObject *obj_transaction_cancel(PyTdbObject *self, PyObject *Py_UNUSED(ignored)) { int ret; PyErr_TDB_RAISE_IF_CLOSED(self); ret = tdb_transaction_cancel(self->ctx); PyErr_TDB_ERROR_IS_ERR_RAISE(ret, self->ctx); Py_RETURN_NONE; } static PyObject *obj_transaction_commit(PyTdbObject *self, PyObject *Py_UNUSED(ignored)) { int ret; PyErr_TDB_RAISE_IF_CLOSED(self); ret = tdb_transaction_commit(self->ctx); PyErr_TDB_ERROR_IS_ERR_RAISE(ret, self->ctx); Py_RETURN_NONE; } static PyObject *obj_transaction_prepare_commit(PyTdbObject *self, PyObject *Py_UNUSED(ignored)) { int ret; PyErr_TDB_RAISE_IF_CLOSED(self); ret = tdb_transaction_prepare_commit(self->ctx); PyErr_TDB_ERROR_IS_ERR_RAISE(ret, self->ctx); Py_RETURN_NONE; } static PyObject *obj_transaction_start(PyTdbObject *self, PyObject *Py_UNUSED(ignored)) { int ret; PyErr_TDB_RAISE_IF_CLOSED(self); ret = tdb_transaction_start(self->ctx); PyErr_TDB_ERROR_IS_ERR_RAISE(ret, self->ctx); Py_RETURN_NONE; } static PyObject *obj_reopen(PyTdbObject *self, PyObject *Py_UNUSED(ignored)) { int ret; PyErr_TDB_RAISE_IF_CLOSED(self); ret = tdb_reopen(self->ctx); if (ret != 0) { self->closed = true; PyErr_SetObject(PyExc_RuntimeError, Py_BuildValue("(i,s)", TDB_ERR_IO, "Failed to reopen database")); return NULL; } Py_RETURN_NONE; } static PyObject *obj_lockall(PyTdbObject *self, PyObject *Py_UNUSED(ignored)) { int ret; PyErr_TDB_RAISE_IF_CLOSED(self); ret = tdb_lockall(self->ctx); PyErr_TDB_ERROR_IS_ERR_RAISE(ret, self->ctx); Py_RETURN_NONE; } static PyObject *obj_unlockall(PyTdbObject *self, PyObject *Py_UNUSED(ignored)) { int ret; PyErr_TDB_RAISE_IF_CLOSED(self); ret = tdb_unlockall(self->ctx); PyErr_TDB_ERROR_IS_ERR_RAISE(ret, self->ctx); Py_RETURN_NONE; } static PyObject *obj_lockall_read(PyTdbObject *self, PyObject *Py_UNUSED(ignored)) { int ret; PyErr_TDB_RAISE_IF_CLOSED(self); ret = tdb_lockall_read(self->ctx); PyErr_TDB_ERROR_IS_ERR_RAISE(ret, self->ctx); Py_RETURN_NONE; } static PyObject *obj_unlockall_read(PyTdbObject *self, PyObject *Py_UNUSED(ignored)) { int ret = tdb_unlockall_read(self->ctx); PyErr_TDB_ERROR_IS_ERR_RAISE(ret, self->ctx); Py_RETURN_NONE; } static PyObject *obj_close(PyTdbObject *self, PyObject *Py_UNUSED(ignored)) { int ret; if (self->closed) Py_RETURN_NONE; ret = tdb_close(self->ctx); self->closed = true; if (ret != 0) { PyErr_SetObject(PyExc_RuntimeError, Py_BuildValue("(i,s)", TDB_ERR_IO, "Failed to close database")); return NULL; } Py_RETURN_NONE; } static PyObject *obj_get(PyTdbObject *self, PyObject *args) { TDB_DATA key; PyObject *py_key; PyErr_TDB_RAISE_IF_CLOSED(self); if (!PyArg_ParseTuple(args, "O", &py_key)) return NULL; key = PyBytes_AsTDB_DATA(py_key); if (!key.dptr) return NULL; return PyBytes_FromTDB_DATA(tdb_fetch(self->ctx, key)); } static PyObject *obj_append(PyTdbObject *self, PyObject *args) { TDB_DATA key, data; PyObject *py_key, *py_data; int ret; PyErr_TDB_RAISE_IF_CLOSED(self); if (!PyArg_ParseTuple(args, "OO", &py_key, &py_data)) return NULL; key = PyBytes_AsTDB_DATA(py_key); if (!key.dptr) return NULL; data = PyBytes_AsTDB_DATA(py_data); if (!data.dptr) return NULL; ret = tdb_append(self->ctx, key, data); PyErr_TDB_ERROR_IS_ERR_RAISE(ret, self->ctx); Py_RETURN_NONE; } static PyObject *obj_firstkey(PyTdbObject *self, PyObject *Py_UNUSED(ignored)) { PyErr_TDB_RAISE_IF_CLOSED(self); return PyBytes_FromTDB_DATA(tdb_firstkey(self->ctx)); } static PyObject *obj_nextkey(PyTdbObject *self, PyObject *args) { TDB_DATA key; PyObject *py_key; PyErr_TDB_RAISE_IF_CLOSED(self); if (!PyArg_ParseTuple(args, "O", &py_key)) return NULL; key = PyBytes_AsTDB_DATA(py_key); if (!key.dptr) return NULL; return PyBytes_FromTDB_DATA(tdb_nextkey(self->ctx, key)); } static PyObject *obj_delete(PyTdbObject *self, PyObject *args) { TDB_DATA key; PyObject *py_key; int ret; PyErr_TDB_RAISE_IF_CLOSED(self); if (!PyArg_ParseTuple(args, "O", &py_key)) return NULL; key = PyBytes_AsTDB_DATA(py_key); if (!key.dptr) return NULL; ret = tdb_delete(self->ctx, key); PyErr_TDB_ERROR_IS_ERR_RAISE(ret, self->ctx); Py_RETURN_NONE; } static int obj_contains(PyTdbObject *self, PyObject *py_key) { TDB_DATA key; int ret; PyErr_TDB_RAISE_RETURN_MINUS_1_IF_CLOSED(self); key = PyBytes_AsTDB_DATA(py_key); if (!key.dptr) { PyErr_BadArgument(); return -1; } ret = tdb_exists(self->ctx, key); if (ret) return 1; return 0; } #if PY_MAJOR_VERSION < 3 static PyObject *obj_has_key(PyTdbObject *self, PyObject *args) { int ret; PyObject *py_key; PyErr_TDB_RAISE_IF_CLOSED(self); if (!PyArg_ParseTuple(args, "O", &py_key)) return NULL; ret = obj_contains(self, py_key); if (ret == -1) return NULL; if (ret) Py_RETURN_TRUE; Py_RETURN_FALSE; } #endif static PyObject *obj_store(PyTdbObject *self, PyObject *args) { TDB_DATA key, value; int ret; int flag = TDB_REPLACE; PyObject *py_key, *py_value; PyErr_TDB_RAISE_IF_CLOSED(self); if (!PyArg_ParseTuple(args, "OO|i", &py_key, &py_value, &flag)) return NULL; key = PyBytes_AsTDB_DATA(py_key); if (!key.dptr) return NULL; value = PyBytes_AsTDB_DATA(py_value); if (!value.dptr) return NULL; ret = tdb_store(self->ctx, key, value, flag); PyErr_TDB_ERROR_IS_ERR_RAISE(ret, self->ctx); Py_RETURN_NONE; } static PyObject *obj_add_flags(PyTdbObject *self, PyObject *args) { unsigned flags; PyErr_TDB_RAISE_IF_CLOSED(self); if (!PyArg_ParseTuple(args, "I", &flags)) return NULL; tdb_add_flags(self->ctx, flags); Py_RETURN_NONE; } static PyObject *obj_remove_flags(PyTdbObject *self, PyObject *args) { unsigned flags; PyErr_TDB_RAISE_IF_CLOSED(self); if (!PyArg_ParseTuple(args, "I", &flags)) return NULL; tdb_remove_flags(self->ctx, flags); Py_RETURN_NONE; } typedef struct { PyObject_HEAD TDB_DATA current; PyTdbObject *iteratee; } PyTdbIteratorObject; static PyObject *tdb_iter_next(PyTdbIteratorObject *self) { TDB_DATA current; PyObject *ret; if (self->current.dptr == NULL && self->current.dsize == 0) return NULL; current = self->current; self->current = tdb_nextkey(self->iteratee->ctx, self->current); ret = PyBytes_FromTDB_DATA(current); return ret; } static void tdb_iter_dealloc(PyTdbIteratorObject *self) { Py_DECREF(self->iteratee); PyObject_Del(self); } PyTypeObject PyTdbIterator = { .tp_name = "Iterator", .tp_basicsize = sizeof(PyTdbIteratorObject), .tp_iternext = (iternextfunc)tdb_iter_next, .tp_dealloc = (destructor)tdb_iter_dealloc, .tp_flags = Py_TPFLAGS_DEFAULT, .tp_iter = PyObject_SelfIter, }; static PyObject *tdb_object_iter(PyTdbObject *self, PyObject *Py_UNUSED(ignored)) { PyTdbIteratorObject *ret; PyErr_TDB_RAISE_IF_CLOSED(self); ret = PyObject_New(PyTdbIteratorObject, &PyTdbIterator); if (!ret) return NULL; ret->current = tdb_firstkey(self->ctx); ret->iteratee = self; Py_INCREF(self); return (PyObject *)ret; } static PyObject *obj_clear(PyTdbObject *self, PyObject *Py_UNUSED(ignored)) { int ret; PyErr_TDB_RAISE_IF_CLOSED(self); ret = tdb_wipe_all(self->ctx); PyErr_TDB_ERROR_IS_ERR_RAISE(ret, self->ctx); Py_RETURN_NONE; } static PyObject *obj_repack(PyTdbObject *self, PyObject *Py_UNUSED(ignored)) { int ret; PyErr_TDB_RAISE_IF_CLOSED(self); ret = tdb_repack(self->ctx); PyErr_TDB_ERROR_IS_ERR_RAISE(ret, self->ctx); Py_RETURN_NONE; } static PyObject *obj_enable_seqnum(PyTdbObject *self, PyObject *Py_UNUSED(ignored)) { PyErr_TDB_RAISE_IF_CLOSED(self); tdb_enable_seqnum(self->ctx); Py_RETURN_NONE; } static PyObject *obj_increment_seqnum_nonblock(PyTdbObject *self, PyObject *Py_UNUSED(ignored)) { PyErr_TDB_RAISE_IF_CLOSED(self); tdb_increment_seqnum_nonblock(self->ctx); Py_RETURN_NONE; } static PyMethodDef tdb_object_methods[] = { { "transaction_cancel", (PyCFunction)obj_transaction_cancel, METH_NOARGS, "S.transaction_cancel() -> None\n" "Cancel the currently active transaction." }, { "transaction_commit", (PyCFunction)obj_transaction_commit, METH_NOARGS, "S.transaction_commit() -> None\n" "Commit the currently active transaction." }, { "transaction_prepare_commit", (PyCFunction)obj_transaction_prepare_commit, METH_NOARGS, "S.transaction_prepare_commit() -> None\n" "Prepare to commit the currently active transaction" }, { "transaction_start", (PyCFunction)obj_transaction_start, METH_NOARGS, "S.transaction_start() -> None\n" "Start a new transaction." }, { "reopen", (PyCFunction)obj_reopen, METH_NOARGS, "Reopen this file." }, { "lock_all", (PyCFunction)obj_lockall, METH_NOARGS, NULL }, { "unlock_all", (PyCFunction)obj_unlockall, METH_NOARGS, NULL }, { "read_lock_all", (PyCFunction)obj_lockall_read, METH_NOARGS, NULL }, { "read_unlock_all", (PyCFunction)obj_unlockall_read, METH_NOARGS, NULL }, { "close", (PyCFunction)obj_close, METH_NOARGS, NULL }, { "get", (PyCFunction)obj_get, METH_VARARGS, "S.get(key) -> value\n" "Fetch a value." }, { "append", (PyCFunction)obj_append, METH_VARARGS, "S.append(key, value) -> None\n" "Append data to an existing key." }, { "firstkey", (PyCFunction)obj_firstkey, METH_NOARGS, "S.firstkey() -> data\n" "Return the first key in this database." }, { "nextkey", (PyCFunction)obj_nextkey, METH_VARARGS, "S.nextkey(key) -> data\n" "Return the next key in this database." }, { "delete", (PyCFunction)obj_delete, METH_VARARGS, "S.delete(key) -> None\n" "Delete an entry." }, #if PY_MAJOR_VERSION < 3 { "has_key", (PyCFunction)obj_has_key, METH_VARARGS, "S.has_key(key) -> None\n" "Check whether key exists in this database." }, #endif { "store", (PyCFunction)obj_store, METH_VARARGS, "S.store(key, data, flag=REPLACE) -> None" "Store data." }, { "add_flags", (PyCFunction)obj_add_flags, METH_VARARGS, "S.add_flags(flags) -> None" }, { "remove_flags", (PyCFunction)obj_remove_flags, METH_VARARGS, "S.remove_flags(flags) -> None" }, #if PY_MAJOR_VERSION >= 3 { "keys", (PyCFunction)tdb_object_iter, METH_NOARGS, "S.iterkeys() -> iterator" }, #else { "iterkeys", (PyCFunction)tdb_object_iter, METH_NOARGS, "S.iterkeys() -> iterator" }, #endif { "clear", (PyCFunction)obj_clear, METH_NOARGS, "S.clear() -> None\n" "Wipe the entire database." }, { "repack", (PyCFunction)obj_repack, METH_NOARGS, "S.repack() -> None\n" "Repack the entire database." }, { "enable_seqnum", (PyCFunction)obj_enable_seqnum, METH_NOARGS, "S.enable_seqnum() -> None" }, { "increment_seqnum_nonblock", (PyCFunction)obj_increment_seqnum_nonblock, METH_NOARGS, "S.increment_seqnum_nonblock() -> None" }, { NULL } }; static PyObject *obj_get_hash_size(PyTdbObject *self, void *closure) { PyErr_TDB_RAISE_IF_CLOSED(self); return PyInt_FromLong(tdb_hash_size(self->ctx)); } static int obj_set_max_dead(PyTdbObject *self, PyObject *max_dead, void *closure) { PyErr_TDB_RAISE_RETURN_MINUS_1_IF_CLOSED(self); if (!PyInt_Check(max_dead)) return -1; tdb_set_max_dead(self->ctx, PyInt_AsLong(max_dead)); return 0; } static PyObject *obj_get_map_size(PyTdbObject *self, void *closure) { PyErr_TDB_RAISE_IF_CLOSED(self); return PyInt_FromLong(tdb_map_size(self->ctx)); } static PyObject *obj_get_freelist_size(PyTdbObject *self, void *closure) { PyErr_TDB_RAISE_IF_CLOSED(self); return PyInt_FromLong(tdb_freelist_size(self->ctx)); } static PyObject *obj_get_flags(PyTdbObject *self, void *closure) { PyErr_TDB_RAISE_IF_CLOSED(self); return PyInt_FromLong(tdb_get_flags(self->ctx)); } static PyObject *obj_get_filename(PyTdbObject *self, void *closure) { PyErr_TDB_RAISE_IF_CLOSED(self); return PyBytes_FromString(tdb_name(self->ctx)); } static PyObject *obj_get_seqnum(PyTdbObject *self, void *closure) { PyErr_TDB_RAISE_IF_CLOSED(self); return PyInt_FromLong(tdb_get_seqnum(self->ctx)); } static PyObject *obj_get_text(PyTdbObject *self, void *closure) { PyObject *mod, *cls, *inst; mod = PyImport_ImportModule("_tdb_text"); if (mod == NULL) return NULL; cls = PyObject_GetAttrString(mod, "TdbTextWrapper"); if (cls == NULL) { Py_DECREF(mod); return NULL; } inst = PyObject_CallFunction(cls, discard_const_p(char, "O"), self); Py_DECREF(mod); Py_DECREF(cls); return inst; } static PyGetSetDef tdb_object_getsetters[] = { { .name = discard_const_p(char, "hash_size"), .get = (getter)obj_get_hash_size, }, { .name = discard_const_p(char, "map_size"), .get = (getter)obj_get_map_size, }, { .name = discard_const_p(char, "freelist_size"), .get = (getter)obj_get_freelist_size, }, { .name = discard_const_p(char, "flags"), .get = (getter)obj_get_flags, }, { .name = discard_const_p(char, "max_dead"), .set = (setter)obj_set_max_dead, }, { .name = discard_const_p(char, "filename"), .get = (getter)obj_get_filename, .doc = discard_const_p(char, "The filename of this TDB file."), }, { .name = discard_const_p(char, "seqnum"), .get = (getter)obj_get_seqnum, }, { .name = discard_const_p(char, "text"), .get = (getter)obj_get_text, }, { .name = NULL } }; static PyObject *tdb_object_repr(PyTdbObject *self) { PyErr_TDB_RAISE_IF_CLOSED(self); if (tdb_get_flags(self->ctx) & TDB_INTERNAL) { return PyUnicode_FromString("Tdb()"); } else { return PyUnicode_FromFormat("Tdb('%s')", tdb_name(self->ctx)); } } static void tdb_object_dealloc(PyTdbObject *self) { if (!self->closed) tdb_close(self->ctx); Py_TYPE(self)->tp_free(self); } static PyObject *obj_getitem(PyTdbObject *self, PyObject *key) { TDB_DATA tkey, val; PyErr_TDB_RAISE_IF_CLOSED(self); if (!PyBytes_Check(key)) { PyErr_SetString(PyExc_TypeError, "Expected bytestring as key"); return NULL; } tkey.dptr = (unsigned char *)PyBytes_AsString(key); tkey.dsize = PyBytes_Size(key); val = tdb_fetch(self->ctx, tkey); if (val.dptr == NULL) { /* * if the key doesn't exist raise KeyError(key) to be * consistent with python dict */ PyErr_SetObject(PyExc_KeyError, key); return NULL; } else { return PyBytes_FromTDB_DATA(val); } } static int obj_setitem(PyTdbObject *self, PyObject *key, PyObject *value) { TDB_DATA tkey, tval; int ret; PyErr_TDB_RAISE_RETURN_MINUS_1_IF_CLOSED(self); if (!PyBytes_Check(key)) { PyErr_SetString(PyExc_TypeError, "Expected bytestring as key"); return -1; } tkey = PyBytes_AsTDB_DATA(key); if (value == NULL) { ret = tdb_delete(self->ctx, tkey); } else { if (!PyBytes_Check(value)) { PyErr_SetString(PyExc_TypeError, "Expected string as value"); return -1; } tval = PyBytes_AsTDB_DATA(value); ret = tdb_store(self->ctx, tkey, tval, TDB_REPLACE); } if (ret != 0) { PyErr_SetTDBError(self->ctx); return -1; } return ret; } static PyMappingMethods tdb_object_mapping = { .mp_subscript = (binaryfunc)obj_getitem, .mp_ass_subscript = (objobjargproc)obj_setitem, }; static PySequenceMethods tdb_object_seq = { .sq_contains = (objobjproc)obj_contains, }; static PyTypeObject PyTdb = { .tp_name = "tdb.Tdb", .tp_basicsize = sizeof(PyTdbObject), .tp_methods = tdb_object_methods, .tp_getset = tdb_object_getsetters, .tp_new = py_tdb_open, .tp_doc = "A TDB file", .tp_repr = (reprfunc)tdb_object_repr, .tp_dealloc = (destructor)tdb_object_dealloc, .tp_as_mapping = &tdb_object_mapping, .tp_as_sequence = &tdb_object_seq, .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_ITER, .tp_iter = PY_DISCARD_FUNC_SIG(getiterfunc,tdb_object_iter), }; static PyMethodDef tdb_methods[] = { { .ml_name = "open", .ml_meth = PY_DISCARD_FUNC_SIG(PyCFunction, py_tdb_open), .ml_flags = METH_VARARGS|METH_KEYWORDS, .ml_doc = "open(name, hash_size=0, tdb_flags=TDB_DEFAULT, " "flags=O_RDWR, mode=0600)\nOpen a TDB file." }, { .ml_name = NULL } }; #define MODULE_DOC "simple key-value database that supports multiple writers." #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, .m_name = "tdb", .m_doc = MODULE_DOC, .m_size = -1, .m_methods = tdb_methods, }; #endif PyObject* module_init(void); PyObject* module_init(void) { PyObject *m; if (PyType_Ready(&PyTdb) < 0) return NULL; if (PyType_Ready(&PyTdbIterator) < 0) return NULL; #if PY_MAJOR_VERSION >= 3 m = PyModule_Create(&moduledef); #else m = Py_InitModule3("tdb", tdb_methods, MODULE_DOC); #endif if (m == NULL) return NULL; PyModule_AddIntConstant(m, "REPLACE", TDB_REPLACE); PyModule_AddIntConstant(m, "INSERT", TDB_INSERT); PyModule_AddIntConstant(m, "MODIFY", TDB_MODIFY); PyModule_AddIntConstant(m, "DEFAULT", TDB_DEFAULT); PyModule_AddIntConstant(m, "CLEAR_IF_FIRST", TDB_CLEAR_IF_FIRST); PyModule_AddIntConstant(m, "INTERNAL", TDB_INTERNAL); PyModule_AddIntConstant(m, "NOLOCK", TDB_NOLOCK); PyModule_AddIntConstant(m, "NOMMAP", TDB_NOMMAP); PyModule_AddIntConstant(m, "CONVERT", TDB_CONVERT); PyModule_AddIntConstant(m, "BIGENDIAN", TDB_BIGENDIAN); PyModule_AddIntConstant(m, "NOSYNC", TDB_NOSYNC); PyModule_AddIntConstant(m, "SEQNUM", TDB_SEQNUM); PyModule_AddIntConstant(m, "VOLATILE", TDB_VOLATILE); PyModule_AddIntConstant(m, "ALLOW_NESTING", TDB_ALLOW_NESTING); PyModule_AddIntConstant(m, "DISALLOW_NESTING", TDB_DISALLOW_NESTING); PyModule_AddIntConstant(m, "INCOMPATIBLE_HASH", TDB_INCOMPATIBLE_HASH); PyModule_AddStringConstant(m, "__docformat__", "restructuredText"); PyModule_AddStringConstant(m, "__version__", PACKAGE_VERSION); Py_INCREF(&PyTdb); PyModule_AddObject(m, "Tdb", (PyObject *)&PyTdb); Py_INCREF(&PyTdbIterator); return m; } #if PY_MAJOR_VERSION >= 3 PyMODINIT_FUNC PyInit_tdb(void); PyMODINIT_FUNC PyInit_tdb(void) { return module_init(); } #else void inittdb(void); void inittdb(void) { module_init(); } #endif tdb-1.4.2/python/tdbdump.py0000660000000000000000000000051413444661620015563 0ustar rootroot00000000000000#!/usr/bin/env python3 # Trivial reimplementation of tdbdump in Python from __future__ import print_function import tdb, sys if len(sys.argv) < 2: print("Usage: tdbdump.py ") sys.exit(1) db = tdb.Tdb(sys.argv[1]) for (k, v) in db.items(): print("{\nkey(%d) = %r\ndata(%d) = %r\n}" % (len(k), k, len(v), v)) tdb-1.4.2/python/tests/simple.py0000660000000000000000000002173313444661620016565 0ustar rootroot00000000000000#!/usr/bin/env python3 # Some simple tests for the Python bindings for TDB # Note that this tests the interface of the Python bindings # It does not test tdb itself. # # Copyright (C) 2007-2008 Jelmer Vernooij # Published under the GNU LGPLv3 or later import sys import os import tempfile from unittest import TestCase import tdb class OpenTdbTests(TestCase): def test_nonexistent_read(self): self.assertRaises(IOError, tdb.Tdb, "/some/nonexistent/file", 0, tdb.DEFAULT, os.O_RDWR) class CloseTdbTests(TestCase): def test_double_close(self): self.tdb = tdb.Tdb(tempfile.mkstemp()[1], 0, tdb.DEFAULT, os.O_CREAT|os.O_RDWR) self.assertNotEqual(None, self.tdb) # ensure that double close does not crash python self.tdb.close() self.tdb.close() # Check that further operations do not crash python self.assertRaises(RuntimeError, lambda: self.tdb.transaction_start()) self.assertRaises(RuntimeError, lambda: self.tdb["bar"]) class InternalTdbTests(TestCase): def test_repr(self): self.tdb = tdb.Tdb() # repr used to crash on internal db self.assertEqual(repr(self.tdb), "Tdb()") class CommonTdbTests(TestCase): """Tests common to both the text & bytes interfaces""" use_text = False def setUp(self): super(CommonTdbTests, self).setUp() self.tdb = tdb.Tdb(tempfile.mkstemp()[1], 0, tdb.DEFAULT, os.O_CREAT|os.O_RDWR) self.assertNotEqual(None, self.tdb) if self.use_text: self.tdb = self.tdb.text def test_lockall(self): self.tdb.lock_all() def test_max_dead(self): self.tdb.max_dead = 20 def test_unlockall(self): self.tdb.lock_all() self.tdb.unlock_all() def test_lockall_read(self): self.tdb.read_lock_all() self.tdb.read_unlock_all() def test_reopen(self): self.tdb.reopen() def test_hash_size(self): self.tdb.hash_size def test_map_size(self): self.tdb.map_size def test_freelist_size(self): self.tdb.freelist_size def test_name(self): self.tdb.filename def test_add_flags(self): self.tdb.add_flags(tdb.NOMMAP) self.tdb.remove_flags(tdb.NOMMAP) class TextCommonTdbTests(CommonTdbTests): use_text = True class SimpleTdbTests(TestCase): def setUp(self): super(SimpleTdbTests, self).setUp() self.tdb = tdb.Tdb(tempfile.mkstemp()[1], 0, tdb.DEFAULT, os.O_CREAT|os.O_RDWR) self.assertNotEqual(None, self.tdb) def test_repr(self): self.assertTrue(repr(self.tdb).startswith("Tdb('")) def test_store(self): self.tdb.store(b"bar", b"bla") self.assertEqual(b"bla", self.tdb.get(b"bar")) def test_getitem(self): self.tdb[b"bar"] = b"foo" self.tdb.reopen() self.assertEqual(b"foo", self.tdb[b"bar"]) def test_delete(self): self.tdb[b"bar"] = b"foo" del self.tdb[b"bar"] self.assertRaises(KeyError, lambda: self.tdb[b"bar"]) def test_contains(self): self.tdb[b"bla"] = b"bloe" self.assertTrue(b"bla" in self.tdb) self.assertFalse(b"qwertyuiop" in self.tdb) if sys.version_info < (3, 0): self.assertTrue(self.tdb.has_key(b"bla")) self.assertFalse(self.tdb.has_key(b"qwertyuiop")) def test_keyerror(self): self.assertRaises(KeyError, lambda: self.tdb[b"bla"]) def test_iterator(self): self.tdb[b"bla"] = b"1" self.tdb[b"brainslug"] = b"2" l = list(self.tdb) l.sort() self.assertEqual([b"bla", b"brainslug"], l) def test_transaction_cancel(self): self.tdb[b"bloe"] = b"2" self.tdb.transaction_start() self.tdb[b"bloe"] = b"1" self.tdb.transaction_cancel() self.assertEqual(b"2", self.tdb[b"bloe"]) def test_transaction_commit(self): self.tdb[b"bloe"] = b"2" self.tdb.transaction_start() self.tdb[b"bloe"] = b"1" self.tdb.transaction_commit() self.assertEqual(b"1", self.tdb[b"bloe"]) def test_transaction_prepare_commit(self): self.tdb[b"bloe"] = b"2" self.tdb.transaction_start() self.tdb[b"bloe"] = b"1" self.tdb.transaction_prepare_commit() self.tdb.transaction_commit() self.assertEqual(b"1", self.tdb[b"bloe"]) def test_iterkeys(self): self.tdb[b"bloe"] = b"2" self.tdb[b"bla"] = b"25" if sys.version_info >= (3, 0): i = self.tdb.keys() else: i = self.tdb.iterkeys() self.assertEqual(set([b"bloe", b"bla"]), set([next(i), next(i)])) def test_clear(self): self.tdb[b"bloe"] = b"2" self.tdb[b"bla"] = b"25" self.assertEqual(2, len(list(self.tdb))) self.tdb.clear() self.assertEqual(0, len(list(self.tdb))) def test_repack(self): self.tdb[b"foo"] = b"abc" self.tdb[b"bar"] = b"def" del self.tdb[b"foo"] self.tdb.repack() def test_seqnum(self): self.tdb.enable_seqnum() seq1 = self.tdb.seqnum self.tdb.increment_seqnum_nonblock() seq2 = self.tdb.seqnum self.assertEqual(seq2-seq1, 1) def test_len(self): self.assertEqual(0, len(list(self.tdb))) self.tdb[b"entry"] = b"value" self.assertEqual(1, len(list(self.tdb))) class TdbTextTests(TestCase): def setUp(self): super(TdbTextTests, self).setUp() self.tdb = tdb.Tdb(tempfile.mkstemp()[1], 0, tdb.DEFAULT, os.O_CREAT|os.O_RDWR) self.assertNotEqual(None, self.tdb) def test_repr(self): self.assertTrue(repr(self.tdb).startswith("Tdb('")) def test_store(self): self.tdb.text.store("bar", "bla") self.assertEqual("bla", self.tdb.text.get("bar")) def test_getitem(self): self.tdb.text["bar"] = "foo" self.tdb.reopen() self.assertEqual("foo", self.tdb.text["bar"]) def test_delete(self): self.tdb.text["bar"] = "foo" del self.tdb.text["bar"] self.assertRaises(KeyError, lambda: self.tdb.text["bar"]) def test_contains(self): self.tdb.text["bla"] = "bloe" self.assertTrue("bla" in self.tdb.text) self.assertFalse("qwertyuiop" in self.tdb.text) if sys.version_info < (3, 0): self.assertTrue(self.tdb.text.has_key("bla")) self.assertFalse(self.tdb.text.has_key("qwertyuiop")) def test_keyerror(self): self.assertRaises(KeyError, lambda: self.tdb.text["bla"]) def test_iterator(self): self.tdb.text["bla"] = "1" self.tdb.text["brainslug"] = "2" l = list(self.tdb.text) l.sort() self.assertEqual(["bla", "brainslug"], l) def test_transaction_cancel(self): self.tdb.text["bloe"] = "2" self.tdb.transaction_start() self.tdb.text["bloe"] = "1" self.tdb.transaction_cancel() self.assertEqual("2", self.tdb.text["bloe"]) def test_transaction_commit(self): self.tdb.text["bloe"] = "2" self.tdb.transaction_start() self.tdb.text["bloe"] = "1" self.tdb.transaction_commit() self.assertEqual("1", self.tdb.text["bloe"]) def test_transaction_prepare_commit(self): self.tdb.text["bloe"] = "2" self.tdb.transaction_start() self.tdb.text["bloe"] = "1" self.tdb.transaction_prepare_commit() self.tdb.transaction_commit() self.assertEqual("1", self.tdb.text["bloe"]) def test_iterkeys(self): self.tdb.text["bloe"] = "2" self.tdb.text["bla"] = "25" if sys.version_info >= (3, 0): i = self.tdb.text.keys() else: i = self.tdb.text.iterkeys() self.assertEqual(set(["bloe", "bla"]), set([next(i), next(i)])) def test_clear(self): self.tdb.text["bloe"] = "2" self.tdb.text["bla"] = "25" self.assertEqual(2, len(list(self.tdb))) self.tdb.clear() self.assertEqual(0, len(list(self.tdb))) def test_repack(self): self.tdb.text["foo"] = "abc" self.tdb.text["bar"] = "def" del self.tdb.text["foo"] self.tdb.repack() def test_len(self): self.assertEqual(0, len(list(self.tdb.text))) self.tdb.text["entry"] = "value" self.assertEqual(1, len(list(self.tdb.text))) def test_text_and_binary(self): text = u'\xfa\u0148\xef\xe7\xf8\xf0\xea' bytestr = text.encode('utf-8') self.tdb[b"entry"] = bytestr self.tdb.text[u"entry2"] = text self.assertEqual(self.tdb.text["entry"], text) self.assertEqual(self.tdb[b"entry2"], bytestr) assert self.tdb.text.raw == self.tdb class VersionTests(TestCase): def test_present(self): self.assertTrue(isinstance(tdb.__version__, str)) if __name__ == '__main__': import unittest unittest.TestProgram() tdb-1.4.2/tdb.pc.in0000660000000000000000000000036012406075657013740 0ustar rootroot00000000000000prefix=@prefix@ exec_prefix=@exec_prefix@ libdir=@libdir@ includedir=@includedir@ Name: tdb Description: A trivial database Version: @PACKAGE_VERSION@ Libs: @LIB_RPATH@ -L${libdir} -ltdb Cflags: -I${includedir} URL: http://tdb.samba.org/ tdb-1.4.2/test/circular_chain.tdb0000660000000000000000000000042013444661620016645 0ustar rootroot00000000000000TDB file m& Ÿ}ö ùþ:ðÐérJ ™&ab °¿׌™&cd Е¬c™&ef tdb-1.4.2/test/circular_freelist.tdb0000660000000000000000000000062013444661620017402 0ustar rootroot00000000000000TDB file m& Ÿ}ö ùþ:PpérJ ™&aa PTÁKfæþÙbb °¿׌™&cc Ð*^ÎfæþÙdd ð•¬c™&ee û)RfæþÙff 0kIð“™&gg tdb-1.4.2/test/external-agent.c0000660000000000000000000001167412702766507016311 0ustar rootroot00000000000000#include "external-agent.h" #include "lock-tracking.h" #include "logging.h" #include #include #include #include #include #include #include #include #include "../common/tdb_private.h" #include "tap-interface.h" #include #include static struct tdb_context *tdb; static enum agent_return do_operation(enum operation op, const char *name) { TDB_DATA k; enum agent_return ret; TDB_DATA data; if (op != OPEN && op != OPEN_WITH_CLEAR_IF_FIRST && !tdb) { diag("external: No tdb open!"); return OTHER_FAILURE; } k.dptr = discard_const_p(uint8_t, name); k.dsize = strlen(name); locking_would_block = 0; switch (op) { case OPEN: if (tdb) { diag("Already have tdb %s open", tdb_name(tdb)); return OTHER_FAILURE; } tdb = tdb_open_ex(name, 0, TDB_DEFAULT, O_RDWR, 0, &taplogctx, NULL); if (!tdb) { if (!locking_would_block) diag("Opening tdb gave %s", strerror(errno)); ret = OTHER_FAILURE; } else ret = SUCCESS; break; case OPEN_WITH_CLEAR_IF_FIRST: if (tdb) return OTHER_FAILURE; tdb = tdb_open_ex(name, 0, TDB_CLEAR_IF_FIRST, O_RDWR, 0, &taplogctx, NULL); ret = tdb ? SUCCESS : OTHER_FAILURE; break; case TRANSACTION_START: ret = tdb_transaction_start(tdb) == 0 ? SUCCESS : OTHER_FAILURE; break; case FETCH: data = tdb_fetch(tdb, k); if (data.dptr == NULL) { if (tdb_error(tdb) == TDB_ERR_NOEXIST) ret = FAILED; else ret = OTHER_FAILURE; } else if (data.dsize != k.dsize || memcmp(data.dptr, k.dptr, k.dsize) != 0) { ret = OTHER_FAILURE; } else { ret = SUCCESS; } free(data.dptr); break; case STORE: ret = tdb_store(tdb, k, k, 0) == 0 ? SUCCESS : OTHER_FAILURE; break; case TRANSACTION_COMMIT: ret = tdb_transaction_commit(tdb)==0 ? SUCCESS : OTHER_FAILURE; break; case CHECK: ret = tdb_check(tdb, NULL, NULL) == 0 ? SUCCESS : OTHER_FAILURE; break; case NEEDS_RECOVERY: ret = tdb_needs_recovery(tdb) ? SUCCESS : FAILED; break; case CLOSE: ret = tdb_close(tdb) == 0 ? SUCCESS : OTHER_FAILURE; tdb = NULL; break; case PING: ret = SUCCESS; break; case UNMAP: ret = tdb_munmap(tdb) == 0 ? SUCCESS : OTHER_FAILURE; if (ret == SUCCESS) { tdb->flags |= TDB_NOMMAP; } break; default: ret = OTHER_FAILURE; } if (locking_would_block) ret = WOULD_HAVE_BLOCKED; return ret; } struct agent { int cmdfd, responsefd; pid_t pid; }; /* Do this before doing any tdb stuff. Return handle, or NULL. */ struct agent *prepare_external_agent(void) { int ret; int command[2], response[2]; char name[1+PATH_MAX]; struct agent *agent = malloc(sizeof(*agent)); if (pipe(command) != 0 || pipe(response) != 0) { fprintf(stderr, "pipe failed: %s\n", strerror(errno)); exit(1); } agent->pid = fork(); if (agent->pid < 0) { fprintf(stderr, "fork failed: %s\n", strerror(errno)); exit(1); } if (agent->pid != 0) { close(command[0]); close(response[1]); agent->cmdfd = command[1]; agent->responsefd = response[0]; return agent; } close(command[1]); close(response[0]); /* We want to fail, not block. */ nonblocking_locks = true; log_prefix = "external: "; while ((ret = read(command[0], name, sizeof(name))) > 0) { enum agent_return result; result = do_operation(name[0], name+1); if (write(response[1], &result, sizeof(result)) != sizeof(result)) abort(); } exit(0); } void shutdown_agent(struct agent *agent) { pid_t p; close(agent->cmdfd); close(agent->responsefd); p = waitpid(agent->pid, NULL, WNOHANG); if (p == 0) { kill(agent->pid, SIGKILL); } waitpid(agent->pid, NULL, 0); free(agent); } /* Ask the external agent to try to do an operation. */ enum agent_return external_agent_operation(struct agent *agent, enum operation op, const char *name) { enum agent_return res; unsigned int len; char *string; if (!name) name = ""; len = 1 + strlen(name) + 1; string = malloc(len); string[0] = op; strncpy(string+1, name, len - 1); string[len-1] = '\0'; if (write(agent->cmdfd, string, len) != len || read(agent->responsefd, &res, sizeof(res)) != sizeof(res)) res = AGENT_DIED; free(string); return res; } const char *agent_return_name(enum agent_return ret) { return ret == SUCCESS ? "SUCCESS" : ret == WOULD_HAVE_BLOCKED ? "WOULD_HAVE_BLOCKED" : ret == AGENT_DIED ? "AGENT_DIED" : ret == FAILED ? "FAILED" : ret == OTHER_FAILURE ? "OTHER_FAILURE" : "**INVALID**"; } const char *operation_name(enum operation op) { switch (op) { case OPEN: return "OPEN"; case OPEN_WITH_CLEAR_IF_FIRST: return "OPEN_WITH_CLEAR_IF_FIRST"; case TRANSACTION_START: return "TRANSACTION_START"; case FETCH: return "FETCH"; case STORE: return "STORE"; case TRANSACTION_COMMIT: return "TRANSACTION_COMMIT"; case CHECK: return "CHECK"; case NEEDS_RECOVERY: return "NEEDS_RECOVERY"; case CLOSE: return "CLOSE"; case PING: return "PING"; case UNMAP: return "UNMAP"; } return "**INVALID**"; } tdb-1.4.2/test/external-agent.h0000660000000000000000000000204712406075657016310 0ustar rootroot00000000000000#ifndef TDB_TEST_EXTERNAL_AGENT_H #define TDB_TEST_EXTERNAL_AGENT_H /* For locking tests, we need a different process to try things at * various times. */ enum operation { OPEN, OPEN_WITH_CLEAR_IF_FIRST, TRANSACTION_START, FETCH, STORE, TRANSACTION_COMMIT, CHECK, NEEDS_RECOVERY, CLOSE, PING, UNMAP, }; /* Do this before doing any tdb stuff. Return handle, or -1. */ struct agent *prepare_external_agent(void); void shutdown_agent(struct agent *agent); enum agent_return { SUCCESS, WOULD_HAVE_BLOCKED, AGENT_DIED, FAILED, /* For fetch, or NEEDS_RECOVERY */ OTHER_FAILURE, }; /* Ask the external agent to try to do an operation. * name == tdb name for OPEN/OPEN_WITH_CLEAR_IF_FIRST, * record name for FETCH/STORE (store stores name as data too) */ enum agent_return external_agent_operation(struct agent *handle, enum operation op, const char *name); /* Mapping enum -> string. */ const char *agent_return_name(enum agent_return ret); const char *operation_name(enum operation op); #endif /* TDB_TEST_EXTERNAL_AGENT_H */ tdb-1.4.2/test/jenkins-be-hash.tdb0000660000000000000000000000127012406075657016657 0ustar rootroot00000000000000TDB file &mƒ×¶”å<ƒtdb-1.4.2/test/jenkins-le-hash.tdb0000660000000000000000000000127012406075657016671 0ustar rootroot00000000000000TDB file m&ƒå”¶×D¹vtdb-1.4.2/test/lock-tracking.c0000660000000000000000000000666712406075657016131 0ustar rootroot00000000000000/* We save the locks so we can reaquire them. */ #include "../common/tdb_private.h" #include #include #include #include #include "tap-interface.h" #include "lock-tracking.h" struct testlock { struct testlock *next; unsigned int off; unsigned int len; int type; }; static struct testlock *testlocks; int locking_errors = 0; bool suppress_lockcheck = false; bool nonblocking_locks; int locking_would_block = 0; void (*unlock_callback)(int fd); int fcntl_with_lockcheck(int fd, int cmd, ... /* arg */ ) { va_list ap; int ret, arg3; struct flock *fl; bool may_block = false; if (cmd != F_SETLK && cmd != F_SETLKW) { /* This may be totally bogus, but we don't know in general. */ va_start(ap, cmd); arg3 = va_arg(ap, int); va_end(ap); return fcntl(fd, cmd, arg3); } va_start(ap, cmd); fl = va_arg(ap, struct flock *); va_end(ap); if (cmd == F_SETLKW && nonblocking_locks) { cmd = F_SETLK; may_block = true; } ret = fcntl(fd, cmd, fl); /* Detect when we failed, but might have been OK if we waited. */ if (may_block && ret == -1 && (errno == EAGAIN || errno == EACCES)) { locking_would_block++; } if (fl->l_type == F_UNLCK) { struct testlock **l; struct testlock *old = NULL; for (l = &testlocks; *l; l = &(*l)->next) { if ((*l)->off == fl->l_start && (*l)->len == fl->l_len) { if (ret == 0) { old = *l; *l = (*l)->next; free(old); } break; } if (((*l)->off == fl->l_start) && ((*l)->len == 0) && (ret == 0)) { /* * Remove a piece from the start of the * allrecord_lock */ old = *l; (*l)->off += fl->l_len; break; } } if (!old && !suppress_lockcheck) { diag("Unknown unlock %u@%u - %i", (int)fl->l_len, (int)fl->l_start, ret); locking_errors++; } } else { struct testlock *new, *i; unsigned int fl_end = fl->l_start + fl->l_len; if (fl->l_len == 0) fl_end = (unsigned int)-1; /* Check for overlaps: we shouldn't do this. */ for (i = testlocks; i; i = i->next) { unsigned int i_end = i->off + i->len; if (i->len == 0) i_end = (unsigned int)-1; if (fl->l_start >= i->off && fl->l_start < i_end) break; if (fl_end >= i->off && fl_end < i_end) break; /* tdb_allrecord_lock does this, handle adjacent: */ if (fl->l_start == i_end && fl->l_type == i->type) { if (ret == 0) { i->len = fl->l_len ? i->len + fl->l_len : 0; } goto done; } } if (i) { /* Special case: upgrade of allrecord lock. */ if (i->type == F_RDLCK && fl->l_type == F_WRLCK && i->off == FREELIST_TOP && fl->l_start == FREELIST_TOP && i->len == 0 && fl->l_len == 0) { if (ret == 0) i->type = F_WRLCK; goto done; } if (!suppress_lockcheck) { diag("%s testlock %u@%u overlaps %u@%u", fl->l_type == F_WRLCK ? "write" : "read", (int)fl->l_len, (int)fl->l_start, i->len, (int)i->off); locking_errors++; } } if (ret == 0) { new = malloc(sizeof *new); new->off = fl->l_start; new->len = fl->l_len; new->type = fl->l_type; new->next = testlocks; testlocks = new; } } done: if (ret == 0 && fl->l_type == F_UNLCK && unlock_callback) unlock_callback(fd); return ret; } unsigned int forget_locking(void) { unsigned int num = 0; while (testlocks) { struct testlock *next = testlocks->next; free(testlocks); testlocks = next; num++; } return num; } tdb-1.4.2/test/lock-tracking.h0000660000000000000000000000125112406075657016116 0ustar rootroot00000000000000#ifndef LOCK_TRACKING_H #define LOCK_TRACKING_H #include /* Set this if you want a callback after fnctl unlock. */ extern void (*unlock_callback)(int fd); /* Replacement fcntl. */ int fcntl_with_lockcheck(int fd, int cmd, ... /* arg */ ); /* Discard locking info: returns number of locks outstanding. */ unsigned int forget_locking(void); /* Number of errors in locking. */ extern int locking_errors; /* Suppress lock checking. */ extern bool suppress_lockcheck; /* Make all locks non-blocking. */ extern bool nonblocking_locks; /* Number of times we failed a lock because we made it non-blocking. */ extern int locking_would_block; #endif /* LOCK_TRACKING_H */ tdb-1.4.2/test/logging.c0000660000000000000000000000131412406075657015007 0ustar rootroot00000000000000#include "logging.h" #include "tap-interface.h" #include #include #include #include bool suppress_logging = false; const char *log_prefix = ""; /* Turn log messages into tap diag messages. */ static void taplog(struct tdb_context *tdb, enum tdb_debug_level level, const char *fmt, ...) { va_list ap; char line[200]; if (suppress_logging) return; va_start(ap, fmt); vsprintf(line, fmt, ap); va_end(ap); /* Strip trailing \n: diag adds it. */ if (line[0] && line[strlen(line)-1] == '\n') diag("%s%.*s", log_prefix, (unsigned)strlen(line)-1, line); else diag("%s%s", log_prefix, line); } struct tdb_logging_context taplogctx = { taplog, NULL }; tdb-1.4.2/test/logging.h0000660000000000000000000000041012406075657015010 0ustar rootroot00000000000000#ifndef TDB_TEST_LOGGING_H #define TDB_TEST_LOGGING_H #include "replace.h" #include "../include/tdb.h" #include extern bool suppress_logging; extern const char *log_prefix; extern struct tdb_logging_context taplogctx; #endif /* TDB_TEST_LOGGING_H */ tdb-1.4.2/test/old-nohash-be.tdb0000660000000000000000000000127012406075657016331 0ustar rootroot00000000000000TDB file &mƒtdb-1.4.2/test/old-nohash-le.tdb0000660000000000000000000000127012406075657016343 0ustar rootroot00000000000000TDB file m&ƒtdb-1.4.2/test/run-3G-file.c0000660000000000000000000000740413527011454015345 0ustar rootroot00000000000000#include "../common/tdb_private.h" #include "../common/io.c" #include "../common/tdb.c" #include "../common/lock.c" #include "../common/freelist.c" #include "../common/traverse.c" #include "../common/transaction.c" #include "../common/error.c" #include "../common/open.c" #include "../common/check.c" #include "../common/hash.c" #include "../common/mutex.c" #include "tap-interface.h" #include #include "logging.h" static int tdb_expand_file_sparse(struct tdb_context *tdb, tdb_off_t size, tdb_off_t addition) { if (tdb->read_only || tdb->traverse_read) { tdb->ecode = TDB_ERR_RDONLY; return -1; } if (tdb_ftruncate(tdb, size+addition) == -1) { char b = 0; ssize_t written = tdb_pwrite(tdb, &b, 1, (size+addition) - 1); if (written == 0) { /* try once more, potentially revealing errno */ written = tdb_pwrite(tdb, &b, 1, (size+addition) - 1); } if (written == 0) { /* again - give up, guessing errno */ errno = ENOSPC; } if (written != 1) { TDB_LOG((tdb, TDB_DEBUG_FATAL, "expand_file to %d failed (%s)\n", size+addition, strerror(errno))); return -1; } } return 0; } static const struct tdb_methods large_io_methods = { tdb_read, tdb_write, tdb_next_hash_chain, tdb_notrans_oob, tdb_expand_file_sparse }; static int test_traverse(struct tdb_context *tdb, TDB_DATA key, TDB_DATA data, void *_data) { TDB_DATA *expect = _data; ok1(key.dsize == strlen("hi")); ok1(memcmp(key.dptr, "hi", strlen("hi")) == 0); ok1(data.dsize == expect->dsize); ok1(memcmp(data.dptr, expect->dptr, data.dsize) == 0); return 0; } int main(int argc, char *argv[]) { struct tdb_context *tdb; TDB_DATA key, orig_data, data; uint32_t hashval; tdb_off_t rec_ptr; struct tdb_record rec; int ret; plan_tests(24); tdb = tdb_open_ex("run-36-file.tdb", 1024, TDB_CLEAR_IF_FIRST, O_CREAT|O_TRUNC|O_RDWR, 0600, &taplogctx, NULL); ok1(tdb); tdb->methods = &large_io_methods; key.dsize = strlen("hi"); key.dptr = discard_const_p(uint8_t, "hi"); orig_data.dsize = strlen("world"); orig_data.dptr = discard_const_p(uint8_t, "world"); /* Enlarge the file (internally multiplies by 2). */ ret = tdb_expand(tdb, 1500000000); #ifdef HAVE_INCOHERENT_MMAP /* This can fail due to mmap failure on 32 bit systems. */ if (ret == -1) { /* These should now fail. */ ok1(tdb_store(tdb, key, orig_data, TDB_INSERT) == -1); data = tdb_fetch(tdb, key); ok1(data.dptr == NULL); ok1(tdb_traverse(tdb, test_traverse, &orig_data) == -1); ok1(tdb_delete(tdb, key) == -1); ok1(tdb_traverse(tdb, test_traverse, NULL) == -1); /* Skip the rest... */ for (ret = 0; ret < 24 - 6; ret++) ok1(1); tdb_close(tdb); return exit_status(); } #endif ok1(ret == 0); /* Put an entry in, and check it. */ ok1(tdb_store(tdb, key, orig_data, TDB_INSERT) == 0); data = tdb_fetch(tdb, key); ok1(data.dsize == strlen("world")); ok1(memcmp(data.dptr, "world", strlen("world")) == 0); free(data.dptr); /* That currently fills at the end, make sure that's true. */ hashval = tdb->hash_fn(&key); rec_ptr = tdb_find_lock_hash(tdb, key, hashval, F_RDLCK, &rec); ok1(rec_ptr); ok1(rec_ptr > 2U*1024*1024*1024); tdb_unlock(tdb, BUCKET(rec.full_hash), F_RDLCK); /* Traverse must work. */ ok1(tdb_traverse(tdb, test_traverse, &orig_data) == 1); /* Delete should work. */ ok1(tdb_delete(tdb, key) == 0); ok1(tdb_traverse(tdb, test_traverse, NULL) == 0); /* Transactions should work. */ ok1(tdb_transaction_start(tdb) == 0); ok1(tdb_store(tdb, key, orig_data, TDB_INSERT) == 0); data = tdb_fetch(tdb, key); ok1(data.dsize == strlen("world")); ok1(memcmp(data.dptr, "world", strlen("world")) == 0); free(data.dptr); ok1(tdb_transaction_commit(tdb) == 0); ok1(tdb_traverse(tdb, test_traverse, &orig_data) == 1); tdb_close(tdb); return exit_status(); } tdb-1.4.2/test/run-allrecord-traverse-deadlock.c0000660000000000000000000001220512553526140021517 0ustar rootroot00000000000000#include "../common/tdb_private.h" #include "../common/io.c" #include "../common/tdb.c" #include "../common/lock.c" #include "../common/freelist.c" #include "../common/traverse.c" #include "../common/transaction.c" #include "../common/error.c" #include "../common/open.c" #include "../common/check.c" #include "../common/hash.c" #include "../common/mutex.c" #include "tap-interface.h" #include #include #include #include #include "logging.h" static void do_allrecord_lock(const char *name, int tdb_flags, int up, int down) { struct tdb_context *tdb; int ret; ssize_t nread, nwritten; char c = 0; tdb = tdb_open_ex(name, 3, tdb_flags, O_RDWR|O_CREAT, 0755, &taplogctx, NULL); ok(tdb, "tdb_open_ex should succeed"); ret = tdb_lockall(tdb); ok(ret == 0, "tdb_lockall should succeed"); nwritten = write(up, &c, sizeof(c)); ok(nwritten == sizeof(c), "write should succeed"); nread = read(down, &c, sizeof(c)); ok(nread == sizeof(c), "read should succeed"); ret = tdb_traverse(tdb, NULL, NULL); ok(ret == -1, "do_allrecord_lock: traverse should fail"); nwritten = write(up, &c, sizeof(c)); ok(nwritten == sizeof(c), "write should succeed"); exit(0); } static void do_traverse(const char *name, int tdb_flags, int up, int down) { struct tdb_context *tdb; int ret; ssize_t nread, nwritten; char c = 0; tdb = tdb_open_ex(name, 3, tdb_flags, O_RDWR|O_CREAT, 0755, &taplogctx, NULL); ok(tdb, "tdb_open_ex should succeed"); ret = tdb_traverse(tdb, NULL, NULL); ok(ret == 1, "do_traverse: tdb_traverse should return 1 record"); nwritten = write(up, &c, sizeof(c)); ok(nwritten == sizeof(c), "write should succeed"); nread = read(down, &c, sizeof(c)); ok(nread == sizeof(c), "read should succeed"); exit(0); } /* * Process 1: get the allrecord_lock on a tdb. * Process 2: start a traverse, this will stall waiting for the * first chainlock: That is taken by the allrecord_lock * Process 1: start a traverse: This will get EDEADLK in trying to * get the TRANSACTION_LOCK. It will deadlock for mutexes, * which don't have built-in deadlock detection. */ static int do_tests(const char *name, int tdb_flags) { struct tdb_context *tdb; int ret; pid_t traverse_child, allrecord_child; int traverse_down[2]; int traverse_up[2]; int allrecord_down[2]; int allrecord_up[2]; char c; ssize_t nread, nwritten; TDB_DATA key, data; key.dsize = strlen("hi"); key.dptr = discard_const_p(uint8_t, "hi"); data.dsize = strlen("world"); data.dptr = discard_const_p(uint8_t, "world"); tdb = tdb_open_ex(name, 3, tdb_flags, O_RDWR|O_CREAT, 0755, &taplogctx, NULL); ok(tdb, "tdb_open_ex should succeed"); ret = tdb_store(tdb, key, data, TDB_INSERT); ok(ret == 0, "tdb_store should succeed"); ret = pipe(traverse_down); ok(ret == 0, "pipe should succeed"); ret = pipe(traverse_up); ok(ret == 0, "pipe should succeed"); ret = pipe(allrecord_down); ok(ret == 0, "pipe should succeed"); ret = pipe(allrecord_up); ok(ret == 0, "pipe should succeed"); allrecord_child = fork(); ok(allrecord_child != -1, "fork should succeed"); if (allrecord_child == 0) { tdb_close(tdb); close(traverse_up[0]); close(traverse_up[1]); close(traverse_down[0]); close(traverse_down[1]); close(allrecord_up[0]); close(allrecord_down[1]); do_allrecord_lock(name, tdb_flags, allrecord_up[1], allrecord_down[0]); exit(0); } close(allrecord_up[1]); close(allrecord_down[0]); nread = read(allrecord_up[0], &c, sizeof(c)); ok(nread == sizeof(c), "read should succeed"); traverse_child = fork(); ok(traverse_child != -1, "fork should succeed"); if (traverse_child == 0) { tdb_close(tdb); close(traverse_up[0]); close(traverse_down[1]); close(allrecord_up[0]); close(allrecord_down[1]); do_traverse(name, tdb_flags, traverse_up[1], traverse_down[0]); exit(0); } close(traverse_up[1]); close(traverse_down[0]); poll(NULL, 0, 1000); nwritten = write(allrecord_down[1], &c, sizeof(c)); ok(nwritten == sizeof(c), "write should succeed"); nread = read(traverse_up[0], &c, sizeof(c)); ok(nread == sizeof(c), "read should succeed"); nwritten = write(traverse_down[1], &c, sizeof(c)); ok(nwritten == sizeof(c), "write should succeed"); nread = read(allrecord_up[0], &c, sizeof(c)); ok(nread == sizeof(c), "ret should succeed"); close(traverse_up[0]); close(traverse_down[1]); close(allrecord_up[0]); close(allrecord_down[1]); diag("%s tests done", name); return exit_status(); } int main(int argc, char *argv[]) { int ret; bool mutex_support; mutex_support = tdb_runtime_check_for_robust_mutexes(); ret = do_tests("marklock-deadlock-fcntl.tdb", TDB_CLEAR_IF_FIRST | TDB_INCOMPATIBLE_HASH); ok(ret == 0, "marklock-deadlock-fcntl.tdb tests should succeed"); if (!mutex_support) { skip(1, "No robust mutex support, " "skipping marklock-deadlock-mutex.tdb tests"); return exit_status(); } ret = do_tests("marklock-deadlock-mutex.tdb", TDB_CLEAR_IF_FIRST | TDB_MUTEX_LOCKING | TDB_INCOMPATIBLE_HASH); ok(ret == 0, "marklock-deadlock-mutex.tdb tests should succeed"); return exit_status(); } tdb-1.4.2/test/run-bad-tdb-header.c0000660000000000000000000000310412406075657016705 0ustar rootroot00000000000000#include "../common/tdb_private.h" #include "../common/io.c" #include "../common/tdb.c" #include "../common/lock.c" #include "../common/freelist.c" #include "../common/traverse.c" #include "../common/transaction.c" #include "../common/error.c" #include "../common/open.c" #include "../common/check.c" #include "../common/hash.c" #include "../common/mutex.c" #include "tap-interface.h" #include #include "logging.h" int main(int argc, char *argv[]) { struct tdb_context *tdb; struct tdb_header hdr; int fd; plan_tests(11); /* Can open fine if complete crap, as long as O_CREAT. */ fd = open("run-bad-tdb-header.tdb", O_RDWR|O_CREAT|O_TRUNC, 0600); ok1(fd >= 0); ok1(write(fd, "hello world", 11) == 11); close(fd); tdb = tdb_open_ex("run-bad-tdb-header.tdb", 1024, 0, O_RDWR, 0, &taplogctx, NULL); ok1(!tdb); tdb = tdb_open_ex("run-bad-tdb-header.tdb", 1024, 0, O_CREAT|O_RDWR, 0600, &taplogctx, NULL); ok1(tdb); tdb_close(tdb); /* Now, with wrong version it should *not* overwrite. */ fd = open("run-bad-tdb-header.tdb", O_RDWR); ok1(fd >= 0); ok1(read(fd, &hdr, sizeof(hdr)) == sizeof(hdr)); ok1(hdr.version == TDB_VERSION); hdr.version++; lseek(fd, 0, SEEK_SET); ok1(write(fd, &hdr, sizeof(hdr)) == sizeof(hdr)); close(fd); tdb = tdb_open_ex("run-bad-tdb-header.tdb", 1024, 0, O_RDWR|O_CREAT, 0600, &taplogctx, NULL); ok1(errno == EIO); ok1(!tdb); /* With truncate, will be fine. */ tdb = tdb_open_ex("run-bad-tdb-header.tdb", 1024, 0, O_RDWR|O_CREAT|O_TRUNC, 0600, &taplogctx, NULL); ok1(tdb); tdb_close(tdb); return exit_status(); } tdb-1.4.2/test/run-check.c0000660000000000000000000000321012406075657015235 0ustar rootroot00000000000000#include "../common/tdb_private.h" #include "../common/io.c" #include "../common/tdb.c" #include "../common/lock.c" #include "../common/freelist.c" #include "../common/traverse.c" #include "../common/transaction.c" #include "../common/error.c" #include "../common/open.c" #include "../common/check.c" #include "../common/hash.c" #include "../common/mutex.c" #include "tap-interface.h" #include #include "logging.h" int main(int argc, char *argv[]) { struct tdb_context *tdb; TDB_DATA key, data; plan_tests(13); tdb = tdb_open_ex("run-check.tdb", 1, TDB_CLEAR_IF_FIRST, O_CREAT|O_TRUNC|O_RDWR, 0600, &taplogctx, NULL); ok1(tdb); ok1(tdb_check(tdb, NULL, NULL) == 0); key.dsize = strlen("hi"); key.dptr = discard_const_p(uint8_t, "hi"); data.dsize = strlen("world"); data.dptr = discard_const_p(uint8_t, "world"); ok1(tdb_store(tdb, key, data, TDB_INSERT) == 0); ok1(tdb_check(tdb, NULL, NULL) == 0); tdb_close(tdb); tdb = tdb_open_ex("run-check.tdb", 1024, 0, O_RDWR, 0, &taplogctx, NULL); ok1(tdb); ok1(tdb_check(tdb, NULL, NULL) == 0); tdb_close(tdb); tdb = tdb_open_ex("test/tdb.corrupt", 1024, 0, O_RDWR, 0, &taplogctx, NULL); ok1(tdb); ok1(tdb_check(tdb, NULL, NULL) == -1); ok1(tdb_error(tdb) == TDB_ERR_CORRUPT); tdb_close(tdb); /* Big and little endian should work! */ tdb = tdb_open_ex("test/old-nohash-le.tdb", 1024, 0, O_RDWR, 0, &taplogctx, NULL); ok1(tdb); ok1(tdb_check(tdb, NULL, NULL) == 0); tdb_close(tdb); tdb = tdb_open_ex("test/old-nohash-be.tdb", 1024, 0, O_RDWR, 0, &taplogctx, NULL); ok1(tdb); ok1(tdb_check(tdb, NULL, NULL) == 0); tdb_close(tdb); return exit_status(); } tdb-1.4.2/test/run-circular-chain.c0000660000000000000000000000150013444661620017036 0ustar rootroot00000000000000#include "../common/tdb_private.h" #include "../common/io.c" #include "../common/tdb.c" #include "../common/lock.c" #include "../common/freelist.c" #include "../common/traverse.c" #include "../common/transaction.c" #include "../common/error.c" #include "../common/open.c" #include "../common/check.c" #include "../common/hash.c" #include "../common/mutex.c" #include "tap-interface.h" #include #include "logging.h" int main(int argc, char *argv[]) { struct tdb_context *tdb; TDB_DATA key; plan_tests(3); tdb = tdb_open_ex( "test/circular_chain.tdb", 0, TDB_DEFAULT, O_RDONLY, 0600, &taplogctx, NULL); ok1(tdb); key.dsize = strlen("x"); key.dptr = discard_const_p(uint8_t, "x"); ok1(tdb_exists(tdb, key) == 0); ok1(tdb_error(tdb) == TDB_ERR_CORRUPT); tdb_close(tdb); return exit_status(); } tdb-1.4.2/test/run-circular-freelist.c0000660000000000000000000000207713444661620017603 0ustar rootroot00000000000000#include "../common/tdb_private.h" #include "../common/io.c" #include "../common/tdb.c" #include "../common/lock.c" #include "../common/freelist.c" #include "../common/traverse.c" #include "../common/transaction.c" #include "../common/error.c" #include "../common/open.c" #include "../common/check.c" #include "../common/hash.c" #include "../common/mutex.c" #include "tap-interface.h" #include #include "logging.h" int main(int argc, char *argv[]) { struct tdb_context *tdb; TDB_DATA key, data; plan_tests(3); tdb = tdb_open_ex( "test/circular_freelist.tdb", 0, TDB_DEFAULT, O_RDWR, 0600, &taplogctx, NULL); ok1(tdb); /* * All freelist records are just 1 byte key and value. Insert * something that will walk the whole freelist and hit the * circle. */ key.dsize = strlen("x"); key.dptr = discard_const_p(uint8_t, "x"); data.dsize = strlen("too long"); data.dptr = discard_const_p(uint8_t, "too long"); ok1(tdb_store(tdb, key, data, TDB_INSERT) == -1); ok1(tdb_error(tdb) == TDB_ERR_CORRUPT); tdb_close(tdb); return exit_status(); } tdb-1.4.2/test/run-corrupt.c0000660000000000000000000000660712406075657015673 0ustar rootroot00000000000000#include "../common/tdb_private.h" #include "../common/io.c" #include "../common/tdb.c" #include "../common/lock.c" #include "../common/freelist.c" #include "../common/traverse.c" #include "../common/transaction.c" #include "../common/error.c" #include "../common/open.c" #include "../common/check.c" #include "../common/hash.c" #include "../common/mutex.c" #include "tap-interface.h" #include #include "logging.h" static int check(TDB_DATA key, TDB_DATA data, void *private) { unsigned int *sizes = private; if (key.dsize > strlen("hello")) return -1; if (memcmp(key.dptr, "hello", key.dsize) != 0) return -1; if (data.dsize != strlen("world")) return -1; if (memcmp(data.dptr, "world", data.dsize) != 0) return -1; sizes[0] += key.dsize; sizes[1] += data.dsize; return 0; } static void tdb_flip_bit(struct tdb_context *tdb, unsigned int bit) { unsigned int off = bit / CHAR_BIT; unsigned char mask = (1 << (bit % CHAR_BIT)); if (tdb->map_ptr) ((unsigned char *)tdb->map_ptr)[off] ^= mask; else { unsigned char c; if (pread(tdb->fd, &c, 1, off) != 1) { fprintf(stderr, "pread: %s\n", strerror(errno)); exit(1); } c ^= mask; if (pwrite(tdb->fd, &c, 1, off) != 1) { fprintf(stderr, "pwrite: %s\n", strerror(errno)); exit(1); } } } static void check_test(struct tdb_context *tdb) { TDB_DATA key, data; unsigned int i, verifiable, corrupt, sizes[2], dsize, ksize; ok1(tdb_check(tdb, NULL, NULL) == 0); key.dptr = discard_const_p(uint8_t, "hello"); data.dsize = strlen("world"); data.dptr = discard_const_p(uint8_t, "world"); /* Key and data size respectively. */ dsize = ksize = 0; /* 5 keys in hash size 2 means we'll have multichains. */ for (key.dsize = 1; key.dsize <= 5; key.dsize++) { ksize += key.dsize; dsize += data.dsize; if (tdb_store(tdb, key, data, TDB_INSERT) != 0) abort(); } /* This is how many bytes we expect to be verifiable. */ /* From the file header. */ verifiable = strlen(TDB_MAGIC_FOOD) + 1 + 2 * sizeof(uint32_t) + 2 * sizeof(tdb_off_t) + 2 * sizeof(uint32_t); /* From the free list chain and hash chains. */ verifiable += 3 * sizeof(tdb_off_t); /* From the record headers & tailer */ verifiable += 5 * (sizeof(struct tdb_record) + sizeof(uint32_t)); /* The free block: we ignore datalen, keylen, full_hash. */ verifiable += sizeof(struct tdb_record) - 3*sizeof(uint32_t) + sizeof(uint32_t); /* Our check function verifies the key and data. */ verifiable += ksize + dsize; /* Flip one bit at a time, make sure it detects verifiable bytes. */ for (i = 0, corrupt = 0; i < tdb->map_size * CHAR_BIT; i++) { tdb_flip_bit(tdb, i); memset(sizes, 0, sizeof(sizes)); if (tdb_check(tdb, check, sizes) != 0) corrupt++; else if (sizes[0] != ksize || sizes[1] != dsize) corrupt++; tdb_flip_bit(tdb, i); } ok(corrupt == verifiable * CHAR_BIT, "corrupt %u should be %u", corrupt, verifiable * CHAR_BIT); } int main(int argc, char *argv[]) { struct tdb_context *tdb; plan_tests(4); /* This should use mmap. */ tdb = tdb_open_ex("run-corrupt.tdb", 2, TDB_CLEAR_IF_FIRST, O_CREAT|O_TRUNC|O_RDWR, 0600, &taplogctx, NULL); if (!tdb) abort(); check_test(tdb); tdb_close(tdb); /* This should not. */ tdb = tdb_open_ex("run-corrupt.tdb", 2, TDB_CLEAR_IF_FIRST|TDB_NOMMAP, O_CREAT|O_TRUNC|O_RDWR, 0600, &taplogctx, NULL); if (!tdb) abort(); check_test(tdb); tdb_close(tdb); return exit_status(); } tdb-1.4.2/test/run-die-during-transaction.c0000660000000000000000000001203112406075657020533 0ustar rootroot00000000000000#include "../common/tdb_private.h" #include "lock-tracking.h" static ssize_t pwrite_check(int fd, const void *buf, size_t count, off_t offset); static ssize_t write_check(int fd, const void *buf, size_t count); static int ftruncate_check(int fd, off_t length); #define pwrite pwrite_check #define write write_check #define fcntl fcntl_with_lockcheck #define ftruncate ftruncate_check #include "../common/io.c" #include "../common/tdb.c" #include "../common/lock.c" #include "../common/freelist.c" #include "../common/traverse.c" #include "../common/transaction.c" #include "../common/error.c" #include "../common/open.c" #include "../common/check.c" #include "../common/hash.c" #include "../common/mutex.c" #include "tap-interface.h" #include #include #include #include #include "external-agent.h" #include "logging.h" #undef write #undef pwrite #undef fcntl #undef ftruncate static bool in_transaction; static int target, current; static jmp_buf jmpbuf; #define TEST_DBNAME "run-die-during-transaction.tdb" #define KEY_STRING "helloworld" static void maybe_die(int fd) { if (in_transaction && current++ == target) { longjmp(jmpbuf, 1); } } static ssize_t pwrite_check(int fd, const void *buf, size_t count, off_t offset) { ssize_t ret; maybe_die(fd); ret = pwrite(fd, buf, count, offset); if (ret != count) return ret; maybe_die(fd); return ret; } static ssize_t write_check(int fd, const void *buf, size_t count) { ssize_t ret; maybe_die(fd); ret = write(fd, buf, count); if (ret != count) return ret; maybe_die(fd); return ret; } static int ftruncate_check(int fd, off_t length) { int ret; maybe_die(fd); ret = ftruncate(fd, length); maybe_die(fd); return ret; } static bool test_death(enum operation op, struct agent *agent) { struct tdb_context *tdb = NULL; TDB_DATA key; enum agent_return ret; int needed_recovery = 0; current = target = 0; reset: unlink(TEST_DBNAME); tdb = tdb_open_ex(TEST_DBNAME, 1024, TDB_NOMMAP, O_CREAT|O_TRUNC|O_RDWR, 0600, &taplogctx, NULL); if (setjmp(jmpbuf) != 0) { /* We're partway through. Simulate our death. */ close(tdb->fd); forget_locking(); in_transaction = false; ret = external_agent_operation(agent, NEEDS_RECOVERY, ""); if (ret == SUCCESS) needed_recovery++; else if (ret != FAILED) { diag("Step %u agent NEEDS_RECOVERY = %s", current, agent_return_name(ret)); return false; } ret = external_agent_operation(agent, op, KEY_STRING); if (ret != SUCCESS) { diag("Step %u op %s failed = %s", current, operation_name(op), agent_return_name(ret)); return false; } ret = external_agent_operation(agent, NEEDS_RECOVERY, ""); if (ret != FAILED) { diag("Still needs recovery after step %u = %s", current, agent_return_name(ret)); return false; } ret = external_agent_operation(agent, CHECK, ""); if (ret != SUCCESS) { diag("Step %u check failed = %s", current, agent_return_name(ret)); return false; } ret = external_agent_operation(agent, CLOSE, ""); if (ret != SUCCESS) { diag("Step %u close failed = %s", current, agent_return_name(ret)); return false; } /* Suppress logging as this tries to use closed fd. */ suppress_logging = true; suppress_lockcheck = true; tdb_close(tdb); suppress_logging = false; suppress_lockcheck = false; target++; current = 0; goto reset; } /* Put key for agent to fetch. */ key.dsize = strlen(KEY_STRING); key.dptr = discard_const_p(uint8_t, KEY_STRING); if (tdb_store(tdb, key, key, TDB_INSERT) != 0) return false; /* This is the key we insert in transaction. */ key.dsize--; ret = external_agent_operation(agent, OPEN, TEST_DBNAME); if (ret != SUCCESS) { fprintf(stderr, "Agent failed to open: %s\n", agent_return_name(ret)); exit(1); } ret = external_agent_operation(agent, FETCH, KEY_STRING); if (ret != SUCCESS) { fprintf(stderr, "Agent failed find key: %s\n", agent_return_name(ret)); exit(1); } in_transaction = true; if (tdb_transaction_start(tdb) != 0) return false; if (tdb_store(tdb, key, key, TDB_INSERT) != 0) return false; if (tdb_transaction_commit(tdb) != 0) return false; in_transaction = false; /* We made it! */ diag("Completed %u runs", current); tdb_close(tdb); ret = external_agent_operation(agent, CLOSE, ""); if (ret != SUCCESS) { diag("Step %u close failed = %s", current, agent_return_name(ret)); return false; } #ifdef HAVE_INCOHERENT_MMAP /* This means we always mmap, which makes this test a noop. */ ok1(1); #else ok1(needed_recovery); #endif ok1(locking_errors == 0); ok1(forget_locking() == 0); locking_errors = 0; return true; } int main(int argc, char *argv[]) { enum operation ops[] = { FETCH, STORE, TRANSACTION_START }; struct agent *agent; int i; plan_tests(12); unlock_callback = maybe_die; agent = prepare_external_agent(); for (i = 0; i < sizeof(ops)/sizeof(ops[0]); i++) { diag("Testing %s after death", operation_name(ops[i])); ok1(test_death(ops[i], agent)); } return exit_status(); } tdb-1.4.2/test/run-endian.c0000660000000000000000000000325412406075657015426 0ustar rootroot00000000000000#include "../common/tdb_private.h" #include "../common/io.c" #include "../common/tdb.c" #include "../common/lock.c" #include "../common/freelist.c" #include "../common/traverse.c" #include "../common/transaction.c" #include "../common/error.c" #include "../common/open.c" #include "../common/check.c" #include "../common/hash.c" #include "../common/mutex.c" #include "tap-interface.h" #include #include "logging.h" int main(int argc, char *argv[]) { struct tdb_context *tdb; TDB_DATA key, data; plan_tests(13); tdb = tdb_open_ex("run-endian.tdb", 1024, TDB_CLEAR_IF_FIRST|TDB_CONVERT, O_CREAT|O_TRUNC|O_RDWR, 0600, &taplogctx, NULL); ok1(tdb); key.dsize = strlen("hi"); key.dptr = discard_const_p(uint8_t, "hi"); data.dsize = strlen("world"); data.dptr = discard_const_p(uint8_t, "world"); ok1(tdb_store(tdb, key, data, TDB_MODIFY) < 0); ok1(tdb_error(tdb) == TDB_ERR_NOEXIST); ok1(tdb_store(tdb, key, data, TDB_INSERT) == 0); ok1(tdb_store(tdb, key, data, TDB_INSERT) < 0); ok1(tdb_error(tdb) == TDB_ERR_EXISTS); ok1(tdb_store(tdb, key, data, TDB_MODIFY) == 0); data = tdb_fetch(tdb, key); ok1(data.dsize == strlen("world")); ok1(memcmp(data.dptr, "world", strlen("world")) == 0); free(data.dptr); key.dsize++; data = tdb_fetch(tdb, key); ok1(data.dptr == NULL); tdb_close(tdb); /* Reopen: should read it */ tdb = tdb_open_ex("run-endian.tdb", 1024, 0, O_RDWR, 0, &taplogctx, NULL); ok1(tdb); key.dsize = strlen("hi"); key.dptr = discard_const_p(uint8_t, "hi"); data = tdb_fetch(tdb, key); ok1(data.dsize == strlen("world")); ok1(memcmp(data.dptr, "world", strlen("world")) == 0); free(data.dptr); tdb_close(tdb); return exit_status(); } tdb-1.4.2/test/run-fcntl-deadlock.c0000660000000000000000000001174213120574744017036 0ustar rootroot00000000000000#include "../common/tdb_private.h" #include "../common/io.c" #include "../common/tdb.c" #include "../common/lock.c" #include "../common/freelist.c" #include "../common/traverse.c" #include "../common/transaction.c" #include "../common/error.c" #include "../common/open.c" #include "../common/check.c" #include "../common/hash.c" #include "../common/mutex.c" #include "replace.h" #include "system/filesys.h" #include "system/time.h" #include #include "tap-interface.h" /* * This tests the low level locking requirement * for the allrecord lock/prepare_commit and traverse_read interaction. * * The pattern with the traverse_read and prepare_commit interaction is * the following: * * 1. transaction_start got the allrecord lock with F_RDLCK. * * 2. the traverse_read code walks the database in a sequence like this * (per chain): * 2.1 chainlock(chainX, F_RDLCK) * 2.2 recordlock(chainX.record1, F_RDLCK) * 2.3 chainunlock(chainX, F_RDLCK) * 2.4 callback(chainX.record1) * 2.5 chainlock(chainX, F_RDLCK) * 2.6 recordunlock(chainX.record1, F_RDLCK) * 2.7 recordlock(chainX.record2, F_RDLCK) * 2.8 chainunlock(chainX, F_RDLCK) * 2.9 callback(chainX.record2) * 2.10 chainlock(chainX, F_RDLCK) * 2.11 recordunlock(chainX.record2, F_RDLCK) * 2.12 chainunlock(chainX, F_RDLCK) * 2.13 goto next chain * * So it has always one record locked in F_RDLCK mode and tries to * get the 2nd one before it releases the first one. * * 3. prepare_commit tries to upgrade the allrecord lock to F_RWLCK * If that happens at the time of 2.4, the operation of * 2.5 may deadlock with the allrecord lock upgrade. * On Linux step 2.5 works in order to make some progress with the * locking, but on solaris it might fail because the kernel * wants to satisfy the 1st lock requester before the 2nd one. * * I think the first step is a standalone test that does this: * * process1: F_RDLCK for ofs=0 len=2 * process2: F_RDLCK for ofs=0 len=1 * process1: upgrade ofs=0 len=2 to F_RWLCK (in blocking mode) * process2: F_RDLCK for ofs=1 len=1 * process2: unlock ofs=0 len=2 * process1: should continue at that point * * Such a test follows here... */ static int raw_fcntl_lock(int fd, int rw, off_t off, off_t len, bool waitflag) { struct flock fl; int cmd; fl.l_type = rw; fl.l_whence = SEEK_SET; fl.l_start = off; fl.l_len = len; fl.l_pid = 0; cmd = waitflag ? F_SETLKW : F_SETLK; return fcntl(fd, cmd, &fl); } static int raw_fcntl_unlock(int fd, off_t off, off_t len) { struct flock fl; fl.l_type = F_UNLCK; fl.l_whence = SEEK_SET; fl.l_start = off; fl.l_len = len; fl.l_pid = 0; return fcntl(fd, F_SETLKW, &fl); } int pipe_r; int pipe_w; char buf[2]; static void expect_char(char c) { read(pipe_r, buf, 1); if (*buf != c) { fail("We were expecting %c, but got %c", c, buf[0]); } } static void send_char(char c) { write(pipe_w, &c, 1); } int main(int argc, char *argv[]) { int process; int fd; const char *filename = "run-fcntl-deadlock.lck"; int pid; int pipes_1_2[2]; int pipes_2_1[2]; int ret; pipe(pipes_1_2); pipe(pipes_2_1); fd = open(filename, O_RDWR | O_CREAT, 0755); pid = fork(); if (pid == 0) { pipe_r = pipes_1_2[0]; pipe_w = pipes_2_1[1]; process = 2; alarm(15); } else { pipe_r = pipes_2_1[0]; pipe_w = pipes_1_2[1]; process = 1; alarm(15); } /* a: process1: F_RDLCK for ofs=0 len=2 */ if (process == 1) { ret = raw_fcntl_lock(fd, F_RDLCK, 0, 2, true); ok(ret == 0, "process 1 lock ofs=0 len=2: %d - %s", ret, strerror(errno)); diag("process 1 took read lock on range 0,2"); send_char('a'); } /* process2: F_RDLCK for ofs=0 len=1 */ if (process == 2) { expect_char('a'); ret = raw_fcntl_lock(fd, F_RDLCK, 0, 1, true); ok(ret == 0, "process 2 lock ofs=0 len=1: %d - %s", ret, strerror(errno));; diag("process 2 took read lock on range 0,1"); send_char('b'); } /* process1: upgrade ofs=0 len=2 to F_RWLCK (in blocking mode) */ if (process == 1) { expect_char('b'); send_char('c'); diag("process 1 starts upgrade on range 0,2"); ret = raw_fcntl_lock(fd, F_WRLCK, 0, 2, true); ok(ret == 0, "process 1 RW lock ofs=0 len=2: %d - %s", ret, strerror(errno)); diag("process 1 got read upgrade done"); /* at this point process 1 is blocked on 2 releasing the read lock */ } /* * process2: F_RDLCK for ofs=1 len=1 * process2: unlock ofs=0 len=2 */ if (process == 2) { expect_char('c'); /* we know process 1 is *about* to lock */ sleep(1); ret = raw_fcntl_lock(fd, F_RDLCK, 1, 1, true); ok(ret == 0, "process 2 lock ofs=1 len=1: %d - %s", ret, strerror(errno)); diag("process 2 got read lock on 1,1\n"); ret = raw_fcntl_unlock(fd, 0, 2); ok(ret == 0, "process 2 unlock ofs=0 len=2: %d - %s", ret, strerror(errno)); diag("process 2 released read lock on 0,2\n"); sleep(1); send_char('d'); } if (process == 1) { expect_char('d'); } diag("process %d has got to the end\n", process); return 0; } tdb-1.4.2/test/run-incompatible.c0000660000000000000000000001131012520121120016574 0ustar rootroot00000000000000#include "../common/tdb_private.h" #include "../common/io.c" #include "../common/tdb.c" #include "../common/lock.c" #include "../common/freelist.c" #include "../common/traverse.c" #include "../common/transaction.c" #include "../common/error.c" #include "../common/open.c" #include "../common/check.c" #include "../common/hash.c" #include "../common/mutex.c" #include "tap-interface.h" #include static unsigned int tdb_dumb_hash(TDB_DATA *key) { return key->dsize; } static void log_fn(struct tdb_context *tdb, enum tdb_debug_level level, const char *fmt, ...) { unsigned int *count = tdb_get_logging_private(tdb); if (strstr(fmt, "hash")) (*count)++; } static unsigned int hdr_rwlocks(const char *fname) { struct tdb_header hdr; ssize_t nread; int fd = open(fname, O_RDONLY); if (fd == -1) return -1; nread = read(fd, &hdr, sizeof(hdr)); close(fd); if (nread != sizeof(hdr)) { return -1; } return hdr.rwlocks; } int main(int argc, char *argv[]) { struct tdb_context *tdb; unsigned int log_count, flags; TDB_DATA d, r; struct tdb_logging_context log_ctx = { log_fn, &log_count }; plan_tests(38 * 2); for (flags = 0; flags <= TDB_CONVERT; flags += TDB_CONVERT) { unsigned int rwmagic = TDB_HASH_RWLOCK_MAGIC; if (flags & TDB_CONVERT) tdb_convert(&rwmagic, sizeof(rwmagic)); /* Create an old-style hash. */ log_count = 0; tdb = tdb_open_ex("run-incompatible.tdb", 0, flags, O_CREAT|O_RDWR|O_TRUNC, 0600, &log_ctx, NULL); ok1(tdb); ok1(log_count == 0); d.dptr = discard_const_p(uint8_t, "Hello"); d.dsize = 5; ok1(tdb_store(tdb, d, d, TDB_INSERT) == 0); tdb_close(tdb); /* Should not have marked rwlocks field. */ ok1(hdr_rwlocks("run-incompatible.tdb") == 0); /* We can still open any old-style with incompat flag. */ log_count = 0; tdb = tdb_open_ex("run-incompatible.tdb", 0, TDB_INCOMPATIBLE_HASH, O_RDWR, 0600, &log_ctx, NULL); ok1(tdb); ok1(log_count == 0); r = tdb_fetch(tdb, d); ok1(r.dsize == 5); free(r.dptr); ok1(tdb_check(tdb, NULL, NULL) == 0); tdb_close(tdb); log_count = 0; tdb = tdb_open_ex("test/jenkins-le-hash.tdb", 0, 0, O_RDONLY, 0, &log_ctx, tdb_jenkins_hash); ok1(tdb); ok1(log_count == 0); ok1(tdb_check(tdb, NULL, NULL) == 0); tdb_close(tdb); log_count = 0; tdb = tdb_open_ex("test/jenkins-be-hash.tdb", 0, 0, O_RDONLY, 0, &log_ctx, tdb_jenkins_hash); ok1(tdb); ok1(log_count == 0); ok1(tdb_check(tdb, NULL, NULL) == 0); tdb_close(tdb); /* OK, now create with incompatible flag, default hash. */ log_count = 0; tdb = tdb_open_ex("run-incompatible.tdb", 0, flags|TDB_INCOMPATIBLE_HASH, O_CREAT|O_RDWR|O_TRUNC, 0600, &log_ctx, NULL); ok1(tdb); ok1(log_count == 0); d.dptr = discard_const_p(uint8_t, "Hello"); d.dsize = 5; ok1(tdb_store(tdb, d, d, TDB_INSERT) == 0); tdb_close(tdb); /* Should have marked rwlocks field. */ ok1(hdr_rwlocks("run-incompatible.tdb") == rwmagic); /* Cannot open with old hash. */ log_count = 0; tdb = tdb_open_ex("run-incompatible.tdb", 0, 0, O_RDWR, 0600, &log_ctx, tdb_old_hash); ok1(!tdb); ok1(log_count == 1); /* Can open with jenkins hash. */ log_count = 0; tdb = tdb_open_ex("run-incompatible.tdb", 0, 0, O_RDWR, 0600, &log_ctx, tdb_jenkins_hash); ok1(tdb); ok1(log_count == 0); r = tdb_fetch(tdb, d); ok1(r.dsize == 5); free(r.dptr); ok1(tdb_check(tdb, NULL, NULL) == 0); tdb_close(tdb); /* Can open by letting it figure it out itself. */ log_count = 0; tdb = tdb_open_ex("run-incompatible.tdb", 0, 0, O_RDWR, 0600, &log_ctx, NULL); ok1(tdb); ok1(log_count == 0); r = tdb_fetch(tdb, d); ok1(r.dsize == 5); free(r.dptr); ok1(tdb_check(tdb, NULL, NULL) == 0); tdb_close(tdb); /* We can also use incompatible hash with other hashes. */ log_count = 0; tdb = tdb_open_ex("run-incompatible.tdb", 0, flags|TDB_INCOMPATIBLE_HASH, O_CREAT|O_RDWR|O_TRUNC, 0600, &log_ctx, tdb_dumb_hash); ok1(tdb); ok1(log_count == 0); d.dptr = discard_const_p(uint8_t, "Hello"); d.dsize = 5; ok1(tdb_store(tdb, d, d, TDB_INSERT) == 0); tdb_close(tdb); /* Should have marked rwlocks field. */ ok1(hdr_rwlocks("run-incompatible.tdb") == rwmagic); /* It should not open if we don't specify. */ log_count = 0; tdb = tdb_open_ex("run-incompatible.tdb", 0, 0, O_RDWR, 0, &log_ctx, NULL); ok1(!tdb); ok1(log_count == 1); /* Should reopen with correct hash. */ log_count = 0; tdb = tdb_open_ex("run-incompatible.tdb", 0, 0, O_RDWR, 0, &log_ctx, tdb_dumb_hash); ok1(tdb); ok1(log_count == 0); r = tdb_fetch(tdb, d); ok1(r.dsize == 5); free(r.dptr); ok1(tdb_check(tdb, NULL, NULL) == 0); tdb_close(tdb); } return exit_status(); } tdb-1.4.2/test/run-marklock-deadlock.c0000660000000000000000000001634313444661620017534 0ustar rootroot00000000000000#include "../common/tdb_private.h" #include "../common/io.c" #include "../common/tdb.c" #include "../common/lock.c" #include "../common/freelist.c" #include "../common/traverse.c" #include "../common/transaction.c" #include "../common/error.c" #include "../common/open.c" #include "../common/check.c" #include "../common/hash.c" #include "../common/mutex.c" #include "tap-interface.h" #include #include #include #include #include "logging.h" static TDB_DATA key, data; static void do_chainlock(const char *name, int tdb_flags, int up, int down) { struct tdb_context *tdb; int ret; ssize_t nread, nwritten; char c = 0; tdb = tdb_open_ex(name, 3, tdb_flags, O_RDWR|O_CREAT, 0755, &taplogctx, NULL); ok(tdb, "tdb_open_ex should succeed"); ret = tdb_chainlock(tdb, key); ok(ret == 0, "tdb_chainlock should succeed"); nwritten = write(up, &c, sizeof(c)); ok(nwritten == sizeof(c), "write should succeed"); nread = read(down, &c, sizeof(c)); ok(nread == sizeof(c), "read should succeed"); exit(0); } static void do_allrecord_lock(const char *name, int tdb_flags, int up, int down) { struct tdb_context *tdb; int ret; ssize_t nread, nwritten; char c = 0; tdb = tdb_open_ex(name, 3, tdb_flags, O_RDWR|O_CREAT, 0755, &taplogctx, NULL); ok(tdb, "tdb_open_ex should succeed"); ret = tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT, false); ok(ret == 0, "tdb_allrecord_lock should succeed"); nwritten = write(up, &c, sizeof(c)); ok(nwritten == sizeof(c), "write should succeed"); nread = read(down, &c, sizeof(c)); ok(nread == sizeof(c), "read should succeed"); exit(0); } /* The code should barf on TDBs created with rwlocks. */ static int do_tests(const char *name, int tdb_flags) { struct tdb_context *tdb; int ret; pid_t chainlock_child, allrecord_child; int chainlock_down[2]; int chainlock_up[2]; int allrecord_down[2]; int allrecord_up[2]; char c; ssize_t nread, nwritten; key.dsize = strlen("hi"); key.dptr = discard_const_p(uint8_t, "hi"); data.dsize = strlen("world"); data.dptr = discard_const_p(uint8_t, "world"); ret = pipe(chainlock_down); ok(ret == 0, "pipe should succeed"); ret = pipe(chainlock_up); ok(ret == 0, "pipe should succeed"); ret = pipe(allrecord_down); ok(ret == 0, "pipe should succeed"); ret = pipe(allrecord_up); ok(ret == 0, "pipe should succeed"); chainlock_child = fork(); ok(chainlock_child != -1, "fork should succeed"); if (chainlock_child == 0) { close(chainlock_up[0]); close(chainlock_down[1]); close(allrecord_up[0]); close(allrecord_up[1]); close(allrecord_down[0]); close(allrecord_down[1]); do_chainlock(name, tdb_flags, chainlock_up[1], chainlock_down[0]); exit(0); } close(chainlock_up[1]); close(chainlock_down[0]); nread = read(chainlock_up[0], &c, sizeof(c)); ok(nread == sizeof(c), "read should succeed"); /* * Now we have a process holding a chainlock. Start another process * trying the allrecord lock. This will block. */ allrecord_child = fork(); ok(allrecord_child != -1, "fork should succeed"); if (allrecord_child == 0) { close(chainlock_up[0]); close(chainlock_up[1]); close(chainlock_down[0]); close(chainlock_down[1]); close(allrecord_up[0]); close(allrecord_down[1]); do_allrecord_lock(name, tdb_flags, allrecord_up[1], allrecord_down[0]); exit(0); } close(allrecord_up[1]); close(allrecord_down[0]); poll(NULL, 0, 500); tdb = tdb_open_ex(name, 3, tdb_flags, O_RDWR|O_CREAT, 0755, &taplogctx, NULL); ok(tdb, "tdb_open_ex should succeed"); /* * Someone already holds a chainlock, but we're able to get the * freelist lock. * * The freelist lock/mutex is independent from the allrecord lock/mutex. */ ret = tdb_chainlock_nonblock(tdb, key); ok(ret == -1, "tdb_chainlock_nonblock should not succeed"); ret = tdb_lock_nonblock(tdb, -1, F_WRLCK); ok(ret == 0, "tdb_lock_nonblock should succeed"); ret = tdb_unlock(tdb, -1, F_WRLCK); ok(ret == 0, "tdb_unlock should succeed"); /* * We have someone else having done the lock for us. Just mark it. */ ret = tdb_chainlock_mark(tdb, key); ok(ret == 0, "tdb_chainlock_mark should succeed"); /* * The tdb_store below will block the freelist. In one version of the * mutex patches, the freelist was already blocked here by the * allrecord child, which was waiting for the chainlock child to give * up its chainlock. Make sure that we don't run into this * deadlock. To exercise the deadlock, just comment out the "ok" * line. * * The freelist lock/mutex is independent from the allrecord lock/mutex. */ ret = tdb_lock_nonblock(tdb, -1, F_WRLCK); ok(ret == 0, "tdb_lock_nonblock should succeed"); ret = tdb_unlock(tdb, -1, F_WRLCK); ok(ret == 0, "tdb_unlock should succeed"); ret = tdb_store(tdb, key, data, TDB_INSERT); ok(ret == 0, "tdb_store should succeed"); ret = tdb_chainlock_unmark(tdb, key); ok(ret == 0, "tdb_chainlock_unmark should succeed"); nwritten = write(chainlock_down[1], &c, sizeof(c)); ok(nwritten == sizeof(c), "write should succeed"); nread = read(chainlock_up[0], &c, sizeof(c)); ok(nread == 0, "read should succeed"); nread = read(allrecord_up[0], &c, sizeof(c)); ok(nread == sizeof(c), "read should succeed"); /* * Someone already holds the allrecord lock, but we're able to get the * freelist lock. * * The freelist lock/mutex is independent from the allrecord lock/mutex. */ ret = tdb_chainlock_nonblock(tdb, key); ok(ret == -1, "tdb_chainlock_nonblock should not succeed"); ret = tdb_lockall_nonblock(tdb); ok(ret == -1, "tdb_lockall_nonblock should not succeed"); ret = tdb_lock_nonblock(tdb, -1, F_WRLCK); ok(ret == 0, "tdb_lock_nonblock should succeed"); ret = tdb_unlock(tdb, -1, F_WRLCK); ok(ret == 0, "tdb_unlock should succeed"); /* * We have someone else having done the lock for us. Just mark it. */ ret = tdb_lockall_mark(tdb); ok(ret == 0, "tdb_lockall_mark should succeed"); ret = tdb_lock_nonblock(tdb, -1, F_WRLCK); ok(ret == 0, "tdb_lock_nonblock should succeed"); ret = tdb_unlock(tdb, -1, F_WRLCK); ok(ret == 0, "tdb_unlock should succeed"); ret = tdb_store(tdb, key, data, TDB_REPLACE); ok(ret == 0, "tdb_store should succeed"); ret = tdb_lockall_unmark(tdb); ok(ret == 0, "tdb_lockall_unmark should succeed"); nwritten = write(allrecord_down[1], &c, sizeof(c)); ok(nwritten == sizeof(c), "write should succeed"); nread = read(allrecord_up[0], &c, sizeof(c)); ok(nread == 0, "read should succeed"); close(chainlock_up[0]); close(chainlock_down[1]); close(allrecord_up[0]); close(allrecord_down[1]); diag("%s tests done", name); return exit_status(); } int main(int argc, char *argv[]) { int ret; bool mutex_support; mutex_support = tdb_runtime_check_for_robust_mutexes(); ret = do_tests("marklock-deadlock-fcntl.tdb", TDB_CLEAR_IF_FIRST | TDB_INCOMPATIBLE_HASH); ok(ret == 0, "marklock-deadlock-fcntl.tdb tests should succeed"); if (!mutex_support) { skip(1, "No robust mutex support, " "skipping marklock-deadlock-mutex.tdb tests"); return exit_status(); } ret = do_tests("marklock-deadlock-mutex.tdb", TDB_CLEAR_IF_FIRST | TDB_MUTEX_LOCKING | TDB_INCOMPATIBLE_HASH); ok(ret == 0, "marklock-deadlock-mutex.tdb tests should succeed"); return exit_status(); } tdb-1.4.2/test/run-mutex-allrecord-bench.c0000660000000000000000000000400612406075657020350 0ustar rootroot00000000000000#include "../common/tdb_private.h" #include "../common/io.c" #include "../common/tdb.c" #include "../common/lock.c" #include "../common/freelist.c" #include "../common/traverse.c" #include "../common/transaction.c" #include "../common/error.c" #include "../common/open.c" #include "../common/check.c" #include "../common/hash.c" #include "../common/mutex.c" #include "tap-interface.h" #include #include #include #include static TDB_DATA key, data; static void log_fn(struct tdb_context *tdb, enum tdb_debug_level level, const char *fmt, ...) { va_list ap; va_start(ap, fmt); vfprintf(stderr, fmt, ap); va_end(ap); } static double timeval_elapsed2(const struct timeval *tv1, const struct timeval *tv2) { return (tv2->tv_sec - tv1->tv_sec) + (tv2->tv_usec - tv1->tv_usec)*1.0e-6; } static double timeval_elapsed(const struct timeval *tv) { struct timeval tv2; gettimeofday(&tv2, NULL); return timeval_elapsed2(tv, &tv2); } /* The code should barf on TDBs created with rwlocks. */ int main(int argc, char *argv[]) { struct tdb_context *tdb; unsigned int log_count; struct tdb_logging_context log_ctx = { log_fn, &log_count }; int ret; struct timeval start; double elapsed; bool runtime_support; runtime_support = tdb_runtime_check_for_robust_mutexes(); if (!runtime_support) { skip(1, "No robust mutex support"); return exit_status(); } key.dsize = strlen("hi"); key.dptr = discard_const_p(uint8_t, "hi"); data.dsize = strlen("world"); data.dptr = discard_const_p(uint8_t, "world"); tdb = tdb_open_ex("mutex-allrecord-bench.tdb", 1000000, TDB_INCOMPATIBLE_HASH| TDB_MUTEX_LOCKING| TDB_CLEAR_IF_FIRST, O_RDWR|O_CREAT, 0755, &log_ctx, NULL); ok(tdb, "tdb_open_ex should succeed"); gettimeofday(&start, NULL); ret = tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT, false); elapsed = timeval_elapsed(&start); ok(ret == 0, "tdb_allrecord_lock should succeed"); diag("allrecord_lock took %f seconds", elapsed); return exit_status(); } tdb-1.4.2/test/run-mutex-allrecord-block.c0000660000000000000000000000546112406075657020371 0ustar rootroot00000000000000#include "../common/tdb_private.h" #include "../common/io.c" #include "../common/tdb.c" #include "../common/lock.c" #include "../common/freelist.c" #include "../common/traverse.c" #include "../common/transaction.c" #include "../common/error.c" #include "../common/open.c" #include "../common/check.c" #include "../common/hash.c" #include "../common/mutex.c" #include "tap-interface.h" #include #include #include #include static TDB_DATA key, data; static void log_fn(struct tdb_context *tdb, enum tdb_debug_level level, const char *fmt, ...) { va_list ap; va_start(ap, fmt); vfprintf(stderr, fmt, ap); va_end(ap); } static int do_child(int tdb_flags, int to, int from) { struct tdb_context *tdb; unsigned int log_count; struct tdb_logging_context log_ctx = { log_fn, &log_count }; int ret; char c = 0; tdb = tdb_open_ex("mutex-allrecord-block.tdb", 3, tdb_flags, O_RDWR|O_CREAT, 0755, &log_ctx, NULL); ok(tdb, "tdb_open_ex should succeed"); ret = tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT, false); ok(ret == 0, "tdb_allrecord_lock should succeed"); write(to, &c, sizeof(c)); read(from, &c, sizeof(c)); ret = tdb_allrecord_unlock(tdb, F_WRLCK, false); ok(ret == 0, "tdb_allrecord_unlock should succeed"); return 0; } /* The code should barf on TDBs created with rwlocks. */ int main(int argc, char *argv[]) { struct tdb_context *tdb; unsigned int log_count; struct tdb_logging_context log_ctx = { log_fn, &log_count }; int ret, status; pid_t child, wait_ret; int fromchild[2]; int tochild[2]; char c; int tdb_flags; bool runtime_support; runtime_support = tdb_runtime_check_for_robust_mutexes(); if (!runtime_support) { skip(1, "No robust mutex support"); return exit_status(); } key.dsize = strlen("hi"); key.dptr = discard_const_p(uint8_t, "hi"); data.dsize = strlen("world"); data.dptr = discard_const_p(uint8_t, "world"); pipe(fromchild); pipe(tochild); tdb_flags = TDB_INCOMPATIBLE_HASH| TDB_MUTEX_LOCKING| TDB_CLEAR_IF_FIRST; child = fork(); if (child == 0) { close(fromchild[0]); close(tochild[1]); return do_child(tdb_flags, fromchild[1], tochild[0]); } close(fromchild[1]); close(tochild[0]); read(fromchild[0], &c, sizeof(c)); tdb = tdb_open_ex("mutex-allrecord-block.tdb", 0, tdb_flags, O_RDWR|O_CREAT, 0755, &log_ctx, NULL); ok(tdb, "tdb_open_ex should succeed"); ret = tdb_chainlock_nonblock(tdb, key); ok(ret == -1, "tdb_chainlock_nonblock should not succeed"); write(tochild[1], &c, sizeof(c)); ret = tdb_chainlock(tdb, key); ok(ret == 0, "tdb_chainlock should not succeed"); ret = tdb_chainunlock(tdb, key); ok(ret == 0, "tdb_chainunlock should succeed"); wait_ret = wait(&status); ok(wait_ret == child, "child should have exited correctly"); diag("done"); return exit_status(); } tdb-1.4.2/test/run-mutex-allrecord-trylock.c0000660000000000000000000000515412406075657020765 0ustar rootroot00000000000000#include "../common/tdb_private.h" #include "../common/io.c" #include "../common/tdb.c" #include "../common/lock.c" #include "../common/freelist.c" #include "../common/traverse.c" #include "../common/transaction.c" #include "../common/error.c" #include "../common/open.c" #include "../common/check.c" #include "../common/hash.c" #include "../common/mutex.c" #include "tap-interface.h" #include #include #include #include static TDB_DATA key, data; static void log_fn(struct tdb_context *tdb, enum tdb_debug_level level, const char *fmt, ...) { va_list ap; va_start(ap, fmt); vfprintf(stderr, fmt, ap); va_end(ap); } static int do_child(int tdb_flags, int to, int from) { struct tdb_context *tdb; unsigned int log_count; struct tdb_logging_context log_ctx = { log_fn, &log_count }; int ret; char c = 0; tdb = tdb_open_ex("mutex-allrecord-trylock.tdb", 3, tdb_flags, O_RDWR|O_CREAT, 0755, &log_ctx, NULL); ok(tdb, "tdb_open_ex should succeed"); ret = tdb_chainlock(tdb, key); ok(ret == 0, "tdb_chainlock should succeed"); write(to, &c, sizeof(c)); read(from, &c, sizeof(c)); ret = tdb_chainunlock(tdb, key); ok(ret == 0, "tdb_chainunlock should succeed"); return 0; } /* The code should barf on TDBs created with rwlocks. */ int main(int argc, char *argv[]) { struct tdb_context *tdb; unsigned int log_count; struct tdb_logging_context log_ctx = { log_fn, &log_count }; int ret, status; pid_t child, wait_ret; int fromchild[2]; int tochild[2]; char c; int tdb_flags; bool runtime_support; runtime_support = tdb_runtime_check_for_robust_mutexes(); if (!runtime_support) { skip(1, "No robust mutex support"); return exit_status(); } key.dsize = strlen("hi"); key.dptr = discard_const_p(uint8_t, "hi"); data.dsize = strlen("world"); data.dptr = discard_const_p(uint8_t, "world"); pipe(fromchild); pipe(tochild); tdb_flags = TDB_INCOMPATIBLE_HASH| TDB_MUTEX_LOCKING| TDB_CLEAR_IF_FIRST; child = fork(); if (child == 0) { close(fromchild[0]); close(tochild[1]); return do_child(tdb_flags, fromchild[1], tochild[0]); } close(fromchild[1]); close(tochild[0]); read(fromchild[0], &c, sizeof(c)); tdb = tdb_open_ex("mutex-allrecord-trylock.tdb", 0, tdb_flags, O_RDWR|O_CREAT, 0755, &log_ctx, NULL); ok(tdb, "tdb_open_ex should succeed"); ret = tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_NOWAIT, false); ok(ret == -1, "tdb_allrecord_lock (nowait) should not succeed"); write(tochild[1], &c, sizeof(c)); wait_ret = wait(&status); ok(wait_ret == child, "child should have exited correctly"); diag("done"); return exit_status(); } tdb-1.4.2/test/run-mutex-die.c0000660000000000000000000001160612406075657016071 0ustar rootroot00000000000000#include "../common/tdb_private.h" #include "lock-tracking.h" static ssize_t pwrite_check(int fd, const void *buf, size_t count, off_t offset); static ssize_t write_check(int fd, const void *buf, size_t count); static int ftruncate_check(int fd, off_t length); #define pwrite pwrite_check #define write write_check #define fcntl fcntl_with_lockcheck #define ftruncate ftruncate_check #include "../common/io.c" #include "../common/tdb.c" #include "../common/lock.c" #include "../common/freelist.c" #include "../common/traverse.c" #include "../common/transaction.c" #include "../common/error.c" #include "../common/open.c" #include "../common/check.c" #include "../common/hash.c" #include "../common/mutex.c" #include "tap-interface.h" #include #include #include #include "external-agent.h" #include "logging.h" #undef write #undef pwrite #undef fcntl #undef ftruncate static int target, current; #define TEST_DBNAME "run-mutex-die.tdb" #define KEY_STRING "helloworld" static void maybe_die(int fd) { if (target == 0) { return; } current += 1; if (current == target) { _exit(1); } } static ssize_t pwrite_check(int fd, const void *buf, size_t count, off_t offset) { ssize_t ret; maybe_die(fd); ret = pwrite(fd, buf, count, offset); if (ret != count) return ret; maybe_die(fd); return ret; } static ssize_t write_check(int fd, const void *buf, size_t count) { ssize_t ret; maybe_die(fd); ret = write(fd, buf, count); if (ret != count) return ret; maybe_die(fd); return ret; } static int ftruncate_check(int fd, off_t length) { int ret; maybe_die(fd); ret = ftruncate(fd, length); maybe_die(fd); return ret; } static enum agent_return flakey_ops(struct agent *a) { enum agent_return ret; /* * Run in the external agent child */ ret = external_agent_operation(a, OPEN_WITH_CLEAR_IF_FIRST, TEST_DBNAME); if (ret != SUCCESS) { fprintf(stderr, "Agent failed to open: %s\n", agent_return_name(ret)); return ret; } ret = external_agent_operation(a, UNMAP, ""); if (ret != SUCCESS) { fprintf(stderr, "Agent failed to unmap: %s\n", agent_return_name(ret)); return ret; } ret = external_agent_operation(a, STORE, "xyz"); if (ret != SUCCESS) { fprintf(stderr, "Agent failed to store: %s\n", agent_return_name(ret)); return ret; } ret = external_agent_operation(a, STORE, KEY_STRING); if (ret != SUCCESS) { fprintf(stderr, "Agent failed store: %s\n", agent_return_name(ret)); return ret; } ret = external_agent_operation(a, FETCH, KEY_STRING); if (ret != SUCCESS) { fprintf(stderr, "Agent failed find key: %s\n", agent_return_name(ret)); return ret; } ret = external_agent_operation(a, PING, ""); if (ret != SUCCESS) { fprintf(stderr, "Agent failed ping: %s\n", agent_return_name(ret)); return ret; } return ret; } static bool prep_db(void) { struct tdb_context *tdb; TDB_DATA key; TDB_DATA data; key.dptr = discard_const_p(uint8_t, KEY_STRING); key.dsize = strlen((char *)key.dptr); data.dptr = discard_const_p(uint8_t, "foo"); data.dsize = strlen((char *)data.dptr); unlink(TEST_DBNAME); tdb = tdb_open_ex( TEST_DBNAME, 2, TDB_INCOMPATIBLE_HASH|TDB_MUTEX_LOCKING|TDB_CLEAR_IF_FIRST, O_CREAT|O_TRUNC|O_RDWR, 0600, &taplogctx, NULL); if (tdb == NULL) { return false; } if (tdb_store(tdb, key, data, TDB_INSERT) != 0) { return false; } tdb_close(tdb); tdb = NULL; forget_locking(); return true; } static bool test_db(void) { struct tdb_context *tdb; int ret; tdb = tdb_open_ex( TEST_DBNAME, 1024, TDB_INCOMPATIBLE_HASH, O_RDWR, 0600, &taplogctx, NULL); if (tdb == NULL) { perror("tdb_open_ex failed"); return false; } ret = tdb_traverse(tdb, NULL, NULL); if (ret == -1) { perror("traverse failed"); goto fail; } tdb_close(tdb); forget_locking(); return true; fail: tdb_close(tdb); return false; } static bool test_one(void) { enum agent_return ret; ret = AGENT_DIED; target = 19; while (ret != SUCCESS) { struct agent *agent; { int child_target = target; bool pret; target = 0; pret = prep_db(); ok1(pret); target = child_target; } agent = prepare_external_agent(); ret = flakey_ops(agent); diag("Agent (target=%d) returns %s", target, agent_return_name(ret)); if (ret == SUCCESS) { ok((target > 19), "At least one AGENT_DIED expected"); } else { ok(ret == AGENT_DIED, "AGENT_DIED expected"); } shutdown_agent(agent); { int child_target = target; bool tret; target = 0; tret = test_db(); ok1(tret); target = child_target; } target += 1; } return true; } int main(int argc, char *argv[]) { bool ret; bool runtime_support; runtime_support = tdb_runtime_check_for_robust_mutexes(); if (!runtime_support) { skip(1, "No robust mutex support"); return exit_status(); } plan_tests(12); unlock_callback = maybe_die; ret = test_one(); ok1(ret); diag("done"); return exit_status(); } tdb-1.4.2/test/run-mutex-openflags2.c0000660000000000000000000000766413444661620017373 0ustar rootroot00000000000000#include "../common/tdb_private.h" #include "../common/io.c" #include "../common/tdb.c" #include "../common/lock.c" #include "../common/freelist.c" #include "../common/traverse.c" #include "../common/transaction.c" #include "../common/error.c" #include "../common/open.c" #include "../common/check.c" #include "../common/hash.c" #include "../common/mutex.c" #include "tap-interface.h" #include #include #include #include #include static TDB_DATA key, data; static void log_void(struct tdb_context *tdb, enum tdb_debug_level level, const char *fmt, ...) { } static void log_fn(struct tdb_context *tdb, enum tdb_debug_level level, const char *fmt, ...) { va_list ap; va_start(ap, fmt); vfprintf(stderr, fmt, ap); va_end(ap); } static int do_child(int fd) { struct tdb_context *tdb; unsigned int log_count; struct tdb_logging_context log_ctx = { log_fn, &log_count }; struct tdb_logging_context nolog_ctx = { log_void, NULL }; char c; read(fd, &c, 1); tdb = tdb_open_ex("mutex-openflags2.tdb", 0, TDB_DEFAULT, O_RDWR|O_CREAT, 0755, &nolog_ctx, NULL); ok((tdb == NULL) && (errno == EINVAL), "TDB_DEFAULT without " "TDB_MUTEX_LOCKING should fail with EINVAL - %d", errno); tdb = tdb_open_ex("mutex-openflags2.tdb", 0, TDB_CLEAR_IF_FIRST, O_RDWR|O_CREAT, 0755, &nolog_ctx, NULL); ok((tdb == NULL) && (errno == EINVAL), "TDB_CLEAR_IF_FIRST without " "TDB_MUTEX_LOCKING should fail with EINVAL - %d", errno); tdb = tdb_open_ex("mutex-openflags2.tdb", 0, TDB_CLEAR_IF_FIRST | TDB_MUTEX_LOCKING | TDB_INTERNAL, O_RDWR|O_CREAT, 0755, &nolog_ctx, NULL); ok((tdb == NULL) && (errno == EINVAL), "TDB_MUTEX_LOCKING with " "TDB_INTERNAL should fail with EINVAL - %d", errno); tdb = tdb_open_ex("mutex-openflags2.tdb", 0, TDB_CLEAR_IF_FIRST | TDB_MUTEX_LOCKING | TDB_NOMMAP, O_RDWR|O_CREAT, 0755, &nolog_ctx, NULL); ok((tdb == NULL) && (errno == EINVAL), "TDB_MUTEX_LOCKING with " "TDB_NOMMAP should fail with EINVAL - %d", errno); tdb = tdb_open_ex("mutex-openflags2.tdb", 0, TDB_CLEAR_IF_FIRST | TDB_MUTEX_LOCKING, O_RDONLY, 0755, &nolog_ctx, NULL); ok((tdb != NULL), "TDB_MUTEX_LOCKING with " "O_RDONLY should work - %d", errno); tdb_close(tdb); tdb = tdb_open_ex("mutex-openflags2.tdb", 0, TDB_CLEAR_IF_FIRST | TDB_MUTEX_LOCKING, O_RDWR|O_CREAT, 0755, &log_ctx, NULL); ok((tdb != NULL), "TDB_MUTEX_LOCKING with TDB_CLEAR_IF_FIRST" "TDB_NOMMAP should work - %d", errno); return 0; } /* The code should barf on TDBs created with rwlocks. */ int main(int argc, char *argv[]) { struct tdb_context *tdb; unsigned int log_count; struct tdb_logging_context log_ctx = { log_fn, &log_count }; struct tdb_logging_context nolog_ctx = { log_void, NULL }; int ret, status; pid_t child, wait_ret; int pipefd[2]; char c = 0; bool runtime_support; runtime_support = tdb_runtime_check_for_robust_mutexes(); ret = pipe(pipefd); ok1(ret == 0); key.dsize = strlen("hi"); key.dptr = discard_const_p(uint8_t, "hi"); data.dsize = strlen("world"); data.dptr = discard_const_p(uint8_t, "world"); if (!runtime_support) { tdb = tdb_open_ex("mutex-openflags2.tdb", 0, TDB_CLEAR_IF_FIRST| TDB_MUTEX_LOCKING, O_RDWR|O_CREAT, 0755, &nolog_ctx, NULL); ok((tdb == NULL) && (errno == ENOSYS), "TDB_MUTEX_LOCKING without " "runtime support should fail with ENOSYS - %d", errno); skip(1, "No robust mutex support"); return exit_status(); } child = fork(); if (child == 0) { return do_child(pipefd[0]); } tdb = tdb_open_ex("mutex-openflags2.tdb", 0, TDB_CLEAR_IF_FIRST| TDB_MUTEX_LOCKING, O_RDWR|O_CREAT, 0755, &log_ctx, NULL); ok((tdb != NULL), "tdb_open_ex with mutexes should succeed"); write(pipefd[1], &c, 1); wait_ret = wait(&status); ok((wait_ret == child) && (status == 0), "child should have exited correctly"); diag("done"); return exit_status(); } tdb-1.4.2/test/run-mutex-transaction1.c0000660000000000000000000001416512445751350017733 0ustar rootroot00000000000000#include "../common/tdb_private.h" #include "../common/io.c" #include "../common/tdb.c" #include "../common/lock.c" #include "../common/freelist.c" #include "../common/traverse.c" #include "../common/transaction.c" #include "../common/error.c" #include "../common/open.c" #include "../common/check.c" #include "../common/hash.c" #include "../common/mutex.c" #include "tap-interface.h" #include #include #include #include static TDB_DATA key, data; static void log_fn(struct tdb_context *tdb, enum tdb_debug_level level, const char *fmt, ...) { va_list ap; va_start(ap, fmt); vfprintf(stderr, fmt, ap); va_end(ap); } static int do_child(int tdb_flags, int to, int from) { struct tdb_context *tdb; unsigned int log_count; struct tdb_logging_context log_ctx = { log_fn, &log_count }; int ret; char c = 0; tdb = tdb_open_ex("mutex-transaction1.tdb", 3, tdb_flags, O_RDWR|O_CREAT, 0755, &log_ctx, NULL); ok(tdb, "tdb_open_ex should succeed"); ret = tdb_transaction_start(tdb); ok(ret == 0, "tdb_transaction_start should succeed"); ret = tdb_store(tdb, key, data, TDB_INSERT); ok(ret == 0, "tdb_store(tdb, key, data, TDB_INSERT) should succeed"); write(to, &c, sizeof(c)); read(from, &c, sizeof(c)); ret = tdb_transaction_cancel(tdb); ok(ret == 0, "tdb_transaction_cancel should succeed"); write(to, &c, sizeof(c)); read(from, &c, sizeof(c)); ret = tdb_transaction_start(tdb); ok(ret == 0, "tdb_transaction_start should succeed"); ret = tdb_store(tdb, key, data, TDB_INSERT); ok(ret == 0, "tdb_store(tdb, key, data, TDB_INSERT) should succeed"); write(to, &c, sizeof(c)); read(from, &c, sizeof(c)); ret = tdb_transaction_commit(tdb); ok(ret == 0, "tdb_transaction_commit should succeed"); write(to, &c, sizeof(c)); read(from, &c, sizeof(c)); ret = tdb_transaction_start(tdb); ok(ret == 0, "tdb_transaction_start should succeed"); ret = tdb_store(tdb, key, key, TDB_REPLACE); ok(ret == 0, "tdb_store(tdb, key, data, TDB_REPLACE) should succeed"); write(to, &c, sizeof(c)); read(from, &c, sizeof(c)); ret = tdb_transaction_commit(tdb); ok(ret == 0, "tdb_transaction_commit should succeed"); write(to, &c, sizeof(c)); read(from, &c, sizeof(c)); return 0; } /* The code should barf on TDBs created with rwlocks. */ int main(int argc, char *argv[]) { struct tdb_context *tdb; unsigned int log_count; struct tdb_logging_context log_ctx = { log_fn, &log_count }; int ret, status; pid_t child, wait_ret; int fromchild[2]; int tochild[2]; TDB_DATA val; char c; int tdb_flags; bool runtime_support; runtime_support = tdb_runtime_check_for_robust_mutexes(); if (!runtime_support) { skip(1, "No robust mutex support"); return exit_status(); } key.dsize = strlen("hi"); key.dptr = discard_const_p(uint8_t, "hi"); data.dsize = strlen("world"); data.dptr = discard_const_p(uint8_t, "world"); pipe(fromchild); pipe(tochild); tdb_flags = TDB_INCOMPATIBLE_HASH| TDB_MUTEX_LOCKING| TDB_CLEAR_IF_FIRST; child = fork(); if (child == 0) { close(fromchild[0]); close(tochild[1]); return do_child(tdb_flags, fromchild[1], tochild[0]); } close(fromchild[1]); close(tochild[0]); read(fromchild[0], &c, sizeof(c)); tdb = tdb_open_ex("mutex-transaction1.tdb", 0, tdb_flags, O_RDWR|O_CREAT, 0755, &log_ctx, NULL); ok(tdb, "tdb_open_ex should succeed"); /* * The child has the transaction running */ ret = tdb_transaction_start_nonblock(tdb); ok(ret == -1, "tdb_transaction_start_nonblock not succeed"); ret = tdb_chainlock_nonblock(tdb, key); ok(ret == -1, "tdb_chainlock_nonblock should not succeed"); /* * We can still read */ ret = tdb_exists(tdb, key); ok(ret == 0, "tdb_exists(tdb, key) should return 0"); val = tdb_fetch(tdb, key); ok(val.dsize == 0, "tdb_fetch(tdb, key) should return an empty value"); write(tochild[1], &c, sizeof(c)); /* * When the child canceled we can start... */ ret = tdb_transaction_start(tdb); ok(ret == 0, "tdb_transaction_start should succeed"); read(fromchild[0], &c, sizeof(c)); write(tochild[1], &c, sizeof(c)); ret = tdb_transaction_cancel(tdb); ok(ret == 0, "tdb_transaction_cancel should succeed"); /* * When we canceled the child can start and store... */ read(fromchild[0], &c, sizeof(c)); /* * We still see the old values before the child commits... */ ret = tdb_exists(tdb, key); ok(ret == 0, "tdb_exists(tdb, key) should return 0"); val = tdb_fetch(tdb, key); ok(val.dsize == 0, "tdb_fetch(tdb, key) should return an empty value"); write(tochild[1], &c, sizeof(c)); read(fromchild[0], &c, sizeof(c)); /* * We see the new values after the commit... */ ret = tdb_exists(tdb, key); ok(ret == 1, "tdb_exists(tdb, key) should return 1"); val = tdb_fetch(tdb, key); ok(val.dsize != 0, "tdb_fetch(tdb, key) should return a value"); ok(val.dsize == data.dsize, "tdb_fetch(tdb, key) should return a value"); ok(memcmp(val.dptr, data.dptr, data.dsize) == 0, "tdb_fetch(tdb, key) should return a value"); write(tochild[1], &c, sizeof(c)); read(fromchild[0], &c, sizeof(c)); /* * The child started a new transaction and replaces the value, * but we still see the old values before the child commits... */ ret = tdb_exists(tdb, key); ok(ret == 1, "tdb_exists(tdb, key) should return 1"); val = tdb_fetch(tdb, key); ok(val.dsize != 0, "tdb_fetch(tdb, key) should return a value"); ok(val.dsize == data.dsize, "tdb_fetch(tdb, key) should return a value"); ok(memcmp(val.dptr, data.dptr, data.dsize) == 0, "tdb_fetch(tdb, key) should return a value"); write(tochild[1], &c, sizeof(c)); read(fromchild[0], &c, sizeof(c)); /* * We see the new values after the commit... */ ret = tdb_exists(tdb, key); ok(ret == 1, "tdb_exists(tdb, key) should return 1"); val = tdb_fetch(tdb, key); ok(val.dsize != 0, "tdb_fetch(tdb, key) should return a value"); ok(val.dsize == key.dsize, "tdb_fetch(tdb, key) should return a value"); ok(memcmp(val.dptr, key.dptr, key.dsize) == 0, "tdb_fetch(tdb, key) should return a value"); write(tochild[1], &c, sizeof(c)); wait_ret = wait(&status); ok(wait_ret == child, "child should have exited correctly"); diag("done"); return exit_status(); } tdb-1.4.2/test/run-mutex-trylock.c0000660000000000000000000000546112406075657017021 0ustar rootroot00000000000000#include "../common/tdb_private.h" #include "../common/io.c" #include "../common/tdb.c" #include "../common/lock.c" #include "../common/freelist.c" #include "../common/traverse.c" #include "../common/transaction.c" #include "../common/error.c" #include "../common/open.c" #include "../common/check.c" #include "../common/hash.c" #include "../common/mutex.c" #include "tap-interface.h" #include #include #include #include static TDB_DATA key, data; static void log_fn(struct tdb_context *tdb, enum tdb_debug_level level, const char *fmt, ...) { va_list ap; va_start(ap, fmt); vfprintf(stderr, fmt, ap); va_end(ap); } static int do_child(int tdb_flags, int to, int from) { struct tdb_context *tdb; unsigned int log_count; struct tdb_logging_context log_ctx = { log_fn, &log_count }; int ret; char c = 0; tdb = tdb_open_ex("mutex-trylock.tdb", 0, tdb_flags, O_RDWR|O_CREAT, 0755, &log_ctx, NULL); ok(tdb, "tdb_open_ex should succeed"); ret = tdb_chainlock(tdb, key); ok(ret == 0, "tdb_chainlock should succeed"); write(to, &c, sizeof(c)); read(from, &c, sizeof(c)); ret = tdb_chainunlock(tdb, key); ok(ret == 0, "tdb_chainunlock should succeed"); write(to, &c, sizeof(c)); return 0; } /* The code should barf on TDBs created with rwlocks. */ int main(int argc, char *argv[]) { struct tdb_context *tdb; unsigned int log_count; struct tdb_logging_context log_ctx = { log_fn, &log_count }; int ret, status; pid_t child, wait_ret; int fromchild[2]; int tochild[2]; char c; int tdb_flags; bool runtime_support; runtime_support = tdb_runtime_check_for_robust_mutexes(); if (!runtime_support) { skip(1, "No robust mutex support"); return exit_status(); } key.dsize = strlen("hi"); key.dptr = discard_const_p(uint8_t, "hi"); data.dsize = strlen("world"); data.dptr = discard_const_p(uint8_t, "world"); pipe(fromchild); pipe(tochild); tdb_flags = TDB_INCOMPATIBLE_HASH| TDB_MUTEX_LOCKING| TDB_CLEAR_IF_FIRST; child = fork(); if (child == 0) { close(fromchild[0]); close(tochild[1]); return do_child(tdb_flags, fromchild[1], tochild[0]); } close(fromchild[1]); close(tochild[0]); read(fromchild[0], &c, sizeof(c)); tdb = tdb_open_ex("mutex-trylock.tdb", 0, tdb_flags, O_RDWR|O_CREAT, 0755, &log_ctx, NULL); ok(tdb, "tdb_open_ex should succeed"); ret = tdb_chainlock_nonblock(tdb, key); ok(ret == -1, "tdb_chainlock_nonblock should not succeed"); write(tochild[1], &c, sizeof(c)); read(fromchild[0], &c, sizeof(c)); ret = tdb_chainlock_nonblock(tdb, key); ok(ret == 0, "tdb_chainlock_nonblock should succeed"); ret = tdb_chainunlock(tdb, key); ok(ret == 0, "tdb_chainunlock should succeed"); wait_ret = wait(&status); ok(wait_ret == child, "child should have exited correctly"); diag("done"); return exit_status(); } tdb-1.4.2/test/run-mutex1.c0000660000000000000000000000642712406075657015420 0ustar rootroot00000000000000#include "../common/tdb_private.h" #include "../common/io.c" #include "../common/tdb.c" #include "../common/lock.c" #include "../common/freelist.c" #include "../common/traverse.c" #include "../common/transaction.c" #include "../common/error.c" #include "../common/open.c" #include "../common/check.c" #include "../common/hash.c" #include "../common/mutex.c" #include "tap-interface.h" #include #include #include #include static TDB_DATA key, data; static void log_fn(struct tdb_context *tdb, enum tdb_debug_level level, const char *fmt, ...) { va_list ap; va_start(ap, fmt); vfprintf(stderr, fmt, ap); va_end(ap); } static int do_child(int tdb_flags, int to, int from) { struct tdb_context *tdb; unsigned int log_count; struct tdb_logging_context log_ctx = { log_fn, &log_count }; int ret; char c = 0; tdb = tdb_open_ex("mutex1.tdb", 0, tdb_flags, O_RDWR|O_CREAT, 0755, &log_ctx, NULL); ok(tdb, "tdb_open_ex should succeed"); ret = tdb_chainlock(tdb, key); ok(ret == 0, "tdb_chainlock should succeed"); write(to, &c, sizeof(c)); read(from, &c, sizeof(c)); ret = tdb_chainunlock(tdb, key); ok(ret == 0, "tdb_chainunlock should succeed"); write(to, &c, sizeof(c)); read(from, &c, sizeof(c)); ret = tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT, false); ok(ret == 0, "tdb_allrecord_lock should succeed"); write(to, &c, sizeof(c)); read(from, &c, sizeof(c)); ret = tdb_allrecord_unlock(tdb, F_WRLCK, false); ok(ret == 0, "tdb_allrecord_lock should succeed"); return 0; } /* The code should barf on TDBs created with rwlocks. */ int main(int argc, char *argv[]) { struct tdb_context *tdb; unsigned int log_count; struct tdb_logging_context log_ctx = { log_fn, &log_count }; int ret, status; pid_t child, wait_ret; int fromchild[2]; int tochild[2]; char c; int tdb_flags; bool runtime_support; runtime_support = tdb_runtime_check_for_robust_mutexes(); if (!runtime_support) { skip(1, "No robust mutex support"); return exit_status(); } key.dsize = strlen("hi"); key.dptr = discard_const_p(uint8_t, "hi"); data.dsize = strlen("world"); data.dptr = discard_const_p(uint8_t, "world"); pipe(fromchild); pipe(tochild); tdb_flags = TDB_INCOMPATIBLE_HASH| TDB_MUTEX_LOCKING| TDB_CLEAR_IF_FIRST; child = fork(); if (child == 0) { close(fromchild[0]); close(tochild[1]); return do_child(tdb_flags, fromchild[1], tochild[0]); } close(fromchild[1]); close(tochild[0]); read(fromchild[0], &c, sizeof(c)); tdb = tdb_open_ex("mutex1.tdb", 0, tdb_flags, O_RDWR|O_CREAT, 0755, &log_ctx, NULL); ok(tdb, "tdb_open_ex should succeed"); write(tochild[1], &c, sizeof(c)); read(fromchild[0], &c, sizeof(c)); ret = tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT, false); ok(ret == 0, "tdb_allrecord_lock should succeed"); ret = tdb_store(tdb, key, data, 0); ok(ret == 0, "tdb_store should succeed"); ret = tdb_allrecord_unlock(tdb, F_WRLCK, false); ok(ret == 0, "tdb_allrecord_unlock should succeed"); write(tochild[1], &c, sizeof(c)); read(fromchild[0], &c, sizeof(c)); write(tochild[1], &c, sizeof(c)); ret = tdb_delete(tdb, key); ok(ret == 0, "tdb_delete should succeed"); wait_ret = wait(&status); ok(wait_ret == child, "child should have exited correctly"); diag("done"); return exit_status(); } tdb-1.4.2/test/run-nested-transactions.c0000660000000000000000000000435712406075657020165 0ustar rootroot00000000000000#include "../common/tdb_private.h" #include "../common/io.c" #include "../common/tdb.c" #include "../common/lock.c" #include "../common/freelist.c" #include "../common/traverse.c" #include "../common/transaction.c" #include "../common/error.c" #include "../common/open.c" #include "../common/check.c" #include "../common/hash.c" #include "../common/mutex.c" #include "tap-interface.h" #include #include #include "logging.h" int main(int argc, char *argv[]) { struct tdb_context *tdb; TDB_DATA key, data; plan_tests(27); key.dsize = strlen("hi"); key.dptr = discard_const_p(uint8_t, "hi"); tdb = tdb_open_ex("run-nested-transactions.tdb", 1024, TDB_CLEAR_IF_FIRST|TDB_DISALLOW_NESTING, O_CREAT|O_TRUNC|O_RDWR, 0600, &taplogctx, NULL); ok1(tdb); /* Nesting disallowed. */ ok1(tdb_transaction_start(tdb) == 0); data.dptr = discard_const_p(uint8_t, "world"); data.dsize = strlen("world"); ok1(tdb_store(tdb, key, data, TDB_INSERT) == 0); data = tdb_fetch(tdb, key); ok1(data.dsize == strlen("world")); ok1(memcmp(data.dptr, "world", strlen("world")) == 0); free(data.dptr); ok1(tdb_transaction_start(tdb) != 0); ok1(tdb_error(tdb) == TDB_ERR_NESTING); data = tdb_fetch(tdb, key); ok1(data.dsize == strlen("world")); ok1(memcmp(data.dptr, "world", strlen("world")) == 0); free(data.dptr); ok1(tdb_transaction_commit(tdb) == 0); data = tdb_fetch(tdb, key); ok1(data.dsize == strlen("world")); ok1(memcmp(data.dptr, "world", strlen("world")) == 0); free(data.dptr); tdb_close(tdb); /* Nesting allowed by default */ tdb = tdb_open_ex("run-nested-transactions.tdb", 1024, TDB_DEFAULT, O_RDWR, 0, &taplogctx, NULL); ok1(tdb); ok1(tdb_transaction_start(tdb) == 0); ok1(tdb_transaction_start(tdb) == 0); ok1(tdb_delete(tdb, key) == 0); ok1(tdb_transaction_commit(tdb) == 0); ok1(!tdb_exists(tdb, key)); ok1(tdb_transaction_cancel(tdb) == 0); /* Surprise! Kills inner "committed" transaction. */ ok1(tdb_exists(tdb, key)); ok1(tdb_transaction_start(tdb) == 0); ok1(tdb_transaction_start(tdb) == 0); ok1(tdb_delete(tdb, key) == 0); ok1(tdb_transaction_commit(tdb) == 0); ok1(!tdb_exists(tdb, key)); ok1(tdb_transaction_commit(tdb) == 0); ok1(!tdb_exists(tdb, key)); tdb_close(tdb); return exit_status(); } tdb-1.4.2/test/run-nested-traverse.c0000660000000000000000000000600613126252766017277 0ustar rootroot00000000000000#include "../common/tdb_private.h" #include "lock-tracking.h" #define fcntl fcntl_with_lockcheck #include "../common/io.c" #include "../common/tdb.c" #include "../common/lock.c" #include "../common/freelist.c" #include "../common/traverse.c" #include "../common/transaction.c" #include "../common/error.c" #include "../common/open.c" #include "../common/check.c" #include "../common/hash.c" #include "../common/mutex.c" #include "tap-interface.h" #undef fcntl #include #include #include "external-agent.h" #include "logging.h" static struct agent *agent; static bool correct_key(TDB_DATA key) { return key.dsize == strlen("hi") && memcmp(key.dptr, "hi", key.dsize) == 0; } static bool correct_data(TDB_DATA data) { return data.dsize == strlen("world") && memcmp(data.dptr, "world", data.dsize) == 0; } static int traverse2(struct tdb_context *tdb, TDB_DATA key, TDB_DATA data, void *p) { ok1(correct_key(key)); ok1(correct_data(data)); return 0; } static int traverse1r(struct tdb_context *tdb, TDB_DATA key, TDB_DATA data, void *p) { ok1(correct_key(key)); ok1(correct_data(data)); ok1(external_agent_operation(agent, TRANSACTION_START, tdb_name(tdb)) == SUCCESS); ok1(external_agent_operation(agent, STORE, tdb_name(tdb)) == SUCCESS); ok1(external_agent_operation(agent, TRANSACTION_COMMIT, tdb_name(tdb)) == WOULD_HAVE_BLOCKED); tdb_traverse(tdb, traverse2, NULL); /* That should *not* release the all-records lock! */ ok1(external_agent_operation(agent, TRANSACTION_START, tdb_name(tdb)) == SUCCESS); ok1(external_agent_operation(agent, STORE, tdb_name(tdb)) == SUCCESS); ok1(external_agent_operation(agent, TRANSACTION_COMMIT, tdb_name(tdb)) == WOULD_HAVE_BLOCKED); return 0; } static int traverse1w(struct tdb_context *tdb, TDB_DATA key, TDB_DATA data, void *p) { ok1(correct_key(key)); ok1(correct_data(data)); ok1(external_agent_operation(agent, TRANSACTION_START, tdb_name(tdb)) == WOULD_HAVE_BLOCKED); tdb_traverse(tdb, traverse2, NULL); /* That should *not* release the all-records lock! */ ok1(external_agent_operation(agent, TRANSACTION_START, tdb_name(tdb)) == WOULD_HAVE_BLOCKED); return 0; } int main(int argc, char *argv[]) { struct tdb_context *tdb; TDB_DATA key, data; plan_tests(17); agent = prepare_external_agent(); tdb = tdb_open_ex("run-nested-traverse.tdb", 1024, TDB_CLEAR_IF_FIRST, O_CREAT|O_TRUNC|O_RDWR, 0600, &taplogctx, NULL); ok1(tdb); ok1(external_agent_operation(agent, OPEN, tdb_name(tdb)) == SUCCESS); ok1(external_agent_operation(agent, TRANSACTION_START, tdb_name(tdb)) == SUCCESS); ok1(external_agent_operation(agent, TRANSACTION_COMMIT, tdb_name(tdb)) == SUCCESS); key.dsize = strlen("hi"); key.dptr = discard_const_p(uint8_t, "hi"); data.dptr = discard_const_p(uint8_t, "world"); data.dsize = strlen("world"); ok1(tdb_store(tdb, key, data, TDB_INSERT) == 0); tdb_traverse(tdb, traverse1w, NULL); tdb_traverse_read(tdb, traverse1r, NULL); tdb_close(tdb); return exit_status(); } tdb-1.4.2/test/run-no-lock-during-traverse.c0000660000000000000000000000467112406075657020655 0ustar rootroot00000000000000#include "../common/tdb_private.h" #include "lock-tracking.h" #define fcntl fcntl_with_lockcheck #include "../common/io.c" #include "../common/tdb.c" #include "../common/lock.c" #include "../common/freelist.c" #include "../common/traverse.c" #include "../common/transaction.c" #include "../common/error.c" #include "../common/open.c" #include "../common/check.c" #include "../common/hash.c" #include "../common/mutex.c" #include "tap-interface.h" #include #include "logging.h" #undef fcntl #define NUM_ENTRIES 10 static bool prepare_entries(struct tdb_context *tdb) { unsigned int i; TDB_DATA key, data; for (i = 0; i < NUM_ENTRIES; i++) { key.dsize = sizeof(i); key.dptr = (void *)&i; data.dsize = strlen("world"); data.dptr = discard_const_p(uint8_t, "world"); if (tdb_store(tdb, key, data, 0) != 0) return false; } return true; } static void delete_entries(struct tdb_context *tdb) { unsigned int i; TDB_DATA key; for (i = 0; i < NUM_ENTRIES; i++) { key.dsize = sizeof(i); key.dptr = (void *)&i; ok1(tdb_delete(tdb, key) == 0); } } /* We don't know how many times this will run. */ static int delete_other(struct tdb_context *tdb, TDB_DATA key, TDB_DATA data, void *private_data) { unsigned int i; memcpy(&i, key.dptr, 4); i = (i + 1) % NUM_ENTRIES; key.dptr = (void *)&i; if (tdb_delete(tdb, key) != 0) (*(int *)private_data)++; return 0; } static int delete_self(struct tdb_context *tdb, TDB_DATA key, TDB_DATA data, void *private_data) { ok1(tdb_delete(tdb, key) == 0); return 0; } int main(int argc, char *argv[]) { struct tdb_context *tdb; int errors = 0; plan_tests(41); tdb = tdb_open_ex("run-no-lock-during-traverse.tdb", 1024, TDB_CLEAR_IF_FIRST, O_CREAT|O_TRUNC|O_RDWR, 0600, &taplogctx, NULL); ok1(tdb); ok1(prepare_entries(tdb)); ok1(locking_errors == 0); ok1(tdb_lockall(tdb) == 0); ok1(locking_errors == 0); tdb_traverse(tdb, delete_other, &errors); ok1(errors == 0); ok1(locking_errors == 0); ok1(tdb_unlockall(tdb) == 0); ok1(prepare_entries(tdb)); ok1(locking_errors == 0); ok1(tdb_lockall(tdb) == 0); ok1(locking_errors == 0); tdb_traverse(tdb, delete_self, NULL); ok1(locking_errors == 0); ok1(tdb_unlockall(tdb) == 0); ok1(prepare_entries(tdb)); ok1(locking_errors == 0); ok1(tdb_lockall(tdb) == 0); ok1(locking_errors == 0); delete_entries(tdb); ok1(locking_errors == 0); ok1(tdb_unlockall(tdb) == 0); ok1(tdb_close(tdb) == 0); return exit_status(); } tdb-1.4.2/test/run-oldhash.c0000660000000000000000000000242512406075657015611 0ustar rootroot00000000000000#include "../common/tdb_private.h" #include "../common/io.c" #include "../common/tdb.c" #include "../common/lock.c" #include "../common/freelist.c" #include "../common/traverse.c" #include "../common/transaction.c" #include "../common/error.c" #include "../common/open.c" #include "../common/check.c" #include "../common/hash.c" #include "../common/mutex.c" #include "tap-interface.h" #include #include "logging.h" int main(int argc, char *argv[]) { struct tdb_context *tdb; plan_tests(8); /* Old format (with zeroes in the hash magic fields) should * open with any hash (since we don't know what hash they used). */ tdb = tdb_open_ex("test/old-nohash-le.tdb", 0, 0, O_RDWR, 0, &taplogctx, NULL); ok1(tdb); ok1(tdb_check(tdb, NULL, NULL) == 0); tdb_close(tdb); tdb = tdb_open_ex("test/old-nohash-be.tdb", 0, 0, O_RDWR, 0, &taplogctx, NULL); ok1(tdb); ok1(tdb_check(tdb, NULL, NULL) == 0); tdb_close(tdb); tdb = tdb_open_ex("test/old-nohash-le.tdb", 0, 0, O_RDWR, 0, &taplogctx, tdb_jenkins_hash); ok1(tdb); ok1(tdb_check(tdb, NULL, NULL) == 0); tdb_close(tdb); tdb = tdb_open_ex("test/old-nohash-be.tdb", 0, 0, O_RDWR, 0, &taplogctx, tdb_jenkins_hash); ok1(tdb); ok1(tdb_check(tdb, NULL, NULL) == 0); tdb_close(tdb); return exit_status(); } tdb-1.4.2/test/run-open-during-transaction.c0000660000000000000000000001012412520121120020702 0ustar rootroot00000000000000#include "../common/tdb_private.h" #include "lock-tracking.h" static ssize_t pwrite_check(int fd, const void *buf, size_t count, off_t offset); static ssize_t write_check(int fd, const void *buf, size_t count); static int ftruncate_check(int fd, off_t length); #define pwrite pwrite_check #define write write_check #define fcntl fcntl_with_lockcheck #define ftruncate ftruncate_check #include "../common/io.c" #include "../common/tdb.c" #include "../common/lock.c" #include "../common/freelist.c" #include "../common/traverse.c" #include "../common/transaction.c" #include "../common/error.c" #include "../common/open.c" #include "../common/check.c" #include "../common/hash.c" #include "../common/mutex.c" #include "tap-interface.h" #include #include #include #include "external-agent.h" #include "logging.h" static struct agent *agent; static bool opened; static int errors = 0; static bool clear_if_first; #define TEST_DBNAME "run-open-during-transaction.tdb" #undef write #undef pwrite #undef fcntl #undef ftruncate static bool is_same(const char *snapshot, const char *latest, off_t len) { unsigned i; for (i = 0; i < len; i++) { if (snapshot[i] != latest[i]) return false; } return true; } static bool compare_file(int fd, const char *snapshot, off_t snapshot_len) { char *contents; bool same; /* over-length read serves as length check. */ contents = malloc(snapshot_len+1); same = pread(fd, contents, snapshot_len+1, 0) == snapshot_len && is_same(snapshot, contents, snapshot_len); free(contents); return same; } static void check_file_intact(int fd) { enum agent_return ret; struct stat st; char *contents; fstat(fd, &st); contents = malloc(st.st_size); if (pread(fd, contents, st.st_size, 0) != st.st_size) { diag("Read fail"); errors++; free(contents); return; } /* Ask agent to open file. */ ret = external_agent_operation(agent, clear_if_first ? OPEN_WITH_CLEAR_IF_FIRST : OPEN, TEST_DBNAME); /* It's OK to open it, but it must not have changed! */ if (!compare_file(fd, contents, st.st_size)) { diag("Agent changed file after opening %s", agent_return_name(ret)); errors++; } if (ret == SUCCESS) { ret = external_agent_operation(agent, CLOSE, NULL); if (ret != SUCCESS) { diag("Agent failed to close tdb: %s", agent_return_name(ret)); errors++; } } else if (ret != WOULD_HAVE_BLOCKED) { diag("Agent opening file gave %s", agent_return_name(ret)); errors++; } free(contents); } static void after_unlock(int fd) { if (opened) check_file_intact(fd); } static ssize_t pwrite_check(int fd, const void *buf, size_t count, off_t offset) { if (opened) check_file_intact(fd); return pwrite(fd, buf, count, offset); } static ssize_t write_check(int fd, const void *buf, size_t count) { if (opened) check_file_intact(fd); return write(fd, buf, count); } static int ftruncate_check(int fd, off_t length) { if (opened) check_file_intact(fd); return ftruncate(fd, length); } int main(int argc, char *argv[]) { const int flags[] = { TDB_DEFAULT, TDB_CLEAR_IF_FIRST, TDB_NOMMAP, TDB_CLEAR_IF_FIRST | TDB_NOMMAP }; int i; struct tdb_context *tdb; TDB_DATA key, data; plan_tests(20); agent = prepare_external_agent(); unlock_callback = after_unlock; for (i = 0; i < sizeof(flags)/sizeof(flags[0]); i++) { clear_if_first = (flags[i] & TDB_CLEAR_IF_FIRST); diag("Test with %s and %s", clear_if_first ? "CLEAR" : "DEFAULT", (flags[i] & TDB_NOMMAP) ? "no mmap" : "mmap"); unlink(TEST_DBNAME); tdb = tdb_open_ex(TEST_DBNAME, 1024, flags[i], O_CREAT|O_TRUNC|O_RDWR, 0600, &taplogctx, NULL); ok1(tdb); opened = true; ok1(tdb_transaction_start(tdb) == 0); key.dsize = strlen("hi"); key.dptr = discard_const_p(uint8_t, "hi"); data.dptr = discard_const_p(uint8_t, "world"); data.dsize = strlen("world"); ok1(tdb_store(tdb, key, data, TDB_INSERT) == 0); ok1(tdb_transaction_commit(tdb) == 0); ok(!errors, "We had %u open errors", errors); opened = false; tdb_close(tdb); } return exit_status(); } tdb-1.4.2/test/run-rdlock-upgrade.c0000660000000000000000000000760413100601766017062 0ustar rootroot00000000000000#include "../common/io.c" #include "../common/tdb.c" #include "../common/lock.c" #include "../common/freelist.c" #include "../common/traverse.c" #include "../common/transaction.c" #include "../common/error.c" #include "../common/open.c" #include "../common/check.c" #include "../common/hash.c" #include "../common/mutex.c" #include "tap-interface.h" #include #include #include #include #include "logging.h" static TDB_DATA key, data; static void do_chainlock(const char *name, int tdb_flags, int up, int down) { struct tdb_context *tdb; int ret; ssize_t nread, nwritten; char c = 0; tdb = tdb_open_ex(name, 3, tdb_flags, O_RDWR|O_CREAT, 0755, &taplogctx, NULL); ok(tdb, "tdb_open_ex should succeed"); ret = tdb_chainlock_read(tdb, key); ok(ret == 0, "tdb_chainlock_read should succeed"); nwritten = write(up, &c, sizeof(c)); ok(nwritten == sizeof(c), "write should succeed"); nread = read(down, &c, sizeof(c)); ok(nread == 0, "read should succeed"); exit(0); } static void do_trylock(const char *name, int tdb_flags, int up, int down) { struct tdb_context *tdb; int ret; ssize_t nread, nwritten; char c = 0; tdb = tdb_open_ex(name, 3, tdb_flags, O_RDWR|O_CREAT, 0755, &taplogctx, NULL); ok(tdb, "tdb_open_ex should succeed"); /* * tdb used to have a bug where with fcntl locks an upgrade * from a readlock to writelock did not check for the * underlying fcntl lock. Mutexes don't distinguish between * readlocks and writelocks, so that bug does not apply here. */ ret = tdb_chainlock_read(tdb, key); ok(ret == 0, "tdb_chainlock_read should succeed"); ret = tdb_chainlock_nonblock(tdb, key); ok(ret == -1, "tdb_chainlock_nonblock should fail"); nwritten = write(up, &c, sizeof(c)); ok(nwritten == sizeof(c), "write should succeed"); nread = read(down, &c, sizeof(c)); ok(nread == 0, "read should succeed"); exit(0); } static int do_tests(const char *name, int tdb_flags) { int ret; pid_t chainlock_child, store_child; int chainlock_down[2]; int chainlock_up[2]; int store_down[2]; int store_up[2]; char c; ssize_t nread; key.dsize = strlen("hi"); key.dptr = discard_const_p(uint8_t, "hi"); data.dsize = strlen("world"); data.dptr = discard_const_p(uint8_t, "world"); ret = pipe(chainlock_down); ok(ret == 0, "pipe should succeed"); ret = pipe(chainlock_up); ok(ret == 0, "pipe should succeed"); ret = pipe(store_down); ok(ret == 0, "pipe should succeed"); ret = pipe(store_up); ok(ret == 0, "pipe should succeed"); chainlock_child = fork(); ok(chainlock_child != -1, "fork should succeed"); if (chainlock_child == 0) { close(chainlock_up[0]); close(chainlock_down[1]); close(store_up[0]); close(store_up[1]); close(store_down[0]); close(store_down[1]); do_chainlock(name, tdb_flags, chainlock_up[1], chainlock_down[0]); exit(0); } close(chainlock_up[1]); close(chainlock_down[0]); nread = read(chainlock_up[0], &c, sizeof(c)); ok(nread == sizeof(c), "read should succeed"); /* * Now we have a process holding a chain read lock. Start * another process trying to write lock. This should fail. */ store_child = fork(); ok(store_child != -1, "fork should succeed"); if (store_child == 0) { close(chainlock_up[0]); close(chainlock_down[1]); close(store_up[0]); close(store_down[1]); do_trylock(name, tdb_flags, store_up[1], store_down[0]); exit(0); } close(store_up[1]); close(store_down[0]); nread = read(store_up[0], &c, sizeof(c)); ok(nread == sizeof(c), "read should succeed"); close(chainlock_up[0]); close(chainlock_down[1]); close(store_up[0]); close(store_down[1]); diag("%s tests done", name); return exit_status(); } int main(int argc, char *argv[]) { int ret; ret = do_tests("rdlock-upgrade.tdb", TDB_CLEAR_IF_FIRST | TDB_INCOMPATIBLE_HASH); ok(ret == 0, "rdlock-upgrade.tdb tests should succeed"); return exit_status(); } tdb-1.4.2/test/run-readonly-check.c0000660000000000000000000000275612406075657017066 0ustar rootroot00000000000000/* We should be able to tdb_check a O_RDONLY tdb, and we were previously allowed * to tdb_check() inside a transaction (though that's paranoia!). */ #include "../common/tdb_private.h" #include "../common/io.c" #include "../common/tdb.c" #include "../common/lock.c" #include "../common/freelist.c" #include "../common/traverse.c" #include "../common/transaction.c" #include "../common/error.c" #include "../common/open.c" #include "../common/check.c" #include "../common/hash.c" #include "../common/mutex.c" #include "tap-interface.h" #include #include "logging.h" int main(int argc, char *argv[]) { struct tdb_context *tdb; TDB_DATA key, data; plan_tests(11); tdb = tdb_open_ex("run-readonly-check.tdb", 1024, TDB_DEFAULT, O_CREAT|O_TRUNC|O_RDWR, 0600, &taplogctx, NULL); ok1(tdb); key.dsize = strlen("hi"); key.dptr = discard_const_p(uint8_t, "hi"); data.dsize = strlen("world"); data.dptr = discard_const_p(uint8_t, "world"); ok1(tdb_store(tdb, key, data, TDB_INSERT) == 0); ok1(tdb_check(tdb, NULL, NULL) == 0); /* We are also allowed to do a check inside a transaction. */ ok1(tdb_transaction_start(tdb) == 0); ok1(tdb_check(tdb, NULL, NULL) == 0); ok1(tdb_close(tdb) == 0); tdb = tdb_open_ex("run-readonly-check.tdb", 1024, TDB_DEFAULT, O_RDONLY, 0, &taplogctx, NULL); ok1(tdb); ok1(tdb_store(tdb, key, data, TDB_MODIFY) == -1); ok1(tdb_error(tdb) == TDB_ERR_RDONLY); ok1(tdb_check(tdb, NULL, NULL) == 0); ok1(tdb_close(tdb) == 0); return exit_status(); } tdb-1.4.2/test/run-rescue-find_entry.c0000660000000000000000000000215012406075657017607 0ustar rootroot00000000000000#include "../common/tdb_private.h" #include "../common/io.c" #include "../common/tdb.c" #include "../common/lock.c" #include "../common/freelist.c" #include "../common/traverse.c" #include "../common/transaction.c" #include "../common/error.c" #include "../common/open.c" #include "../common/check.c" #include "../common/hash.c" #include "../common/rescue.c" #include "../common/mutex.c" #include "tap-interface.h" #include #include "logging.h" #define NUM 20 /* Binary searches are deceptively simple: easy to screw up! */ int main(int argc, char *argv[]) { unsigned int i, j, n; struct found f[NUM+1]; struct found_table table; /* Set up array for searching. */ for (i = 0; i < NUM+1; i++) { f[i].head = i * 3; } table.arr = f; for (i = 0; i < NUM; i++) { table.num = i; for (j = 0; j < (i + 2) * 3; j++) { n = find_entry(&table, j); ok1(n <= i); /* If we were searching for something too large... */ if (j > i*3) ok1(n == i); else { /* It must give us something after j */ ok1(f[n].head >= j); ok1(n == 0 || f[n-1].head < j); } } } return exit_status(); } tdb-1.4.2/test/run-rescue.c0000660000000000000000000000566012406075657015461 0ustar rootroot00000000000000#include "../common/tdb_private.h" #include "../common/io.c" #include "../common/tdb.c" #include "../common/lock.c" #include "../common/freelist.c" #include "../common/traverse.c" #include "../common/transaction.c" #include "../common/error.c" #include "../common/open.c" #include "../common/check.c" #include "../common/hash.c" #include "../common/rescue.c" #include "../common/mutex.c" #include "tap-interface.h" #include #include "logging.h" struct walk_data { TDB_DATA key; TDB_DATA data; bool fail; unsigned count; }; static inline bool tdb_deq(TDB_DATA a, TDB_DATA b) { return a.dsize == b.dsize && memcmp(a.dptr, b.dptr, a.dsize) == 0; } static inline TDB_DATA tdb_mkdata(const void *p, size_t len) { TDB_DATA d; d.dptr = discard_const_p(uint8_t, p); d.dsize = len; return d; } static void walk(TDB_DATA key, TDB_DATA data, void *_wd) { struct walk_data *wd = _wd; if (!tdb_deq(key, wd->key)) { wd->fail = true; } if (!tdb_deq(data, wd->data)) { wd->fail = true; } wd->count++; } static void count_records(TDB_DATA key, TDB_DATA data, void *_wd) { struct walk_data *wd = _wd; if (!tdb_deq(key, wd->key) || !tdb_deq(data, wd->data)) diag("%.*s::%.*s", (int)key.dsize, key.dptr, (int)data.dsize, data.dptr); wd->count++; } static void log_fn(struct tdb_context *tdb, enum tdb_debug_level level, const char *fmt, ...) { unsigned int *count = tdb_get_logging_private(tdb); (*count)++; } int main(int argc, char *argv[]) { struct tdb_context *tdb; struct walk_data wd; unsigned int i, size, log_count = 0; struct tdb_logging_context log_ctx = { log_fn, &log_count }; plan_tests(8); tdb = tdb_open_ex("run-rescue.tdb", 1, TDB_CLEAR_IF_FIRST, O_CREAT|O_TRUNC|O_RDWR, 0600, &log_ctx, NULL); wd.key.dsize = strlen("hi"); wd.key.dptr = discard_const_p(uint8_t, "hi"); wd.data.dsize = strlen("world"); wd.data.dptr = discard_const_p(uint8_t, "world"); wd.count = 0; wd.fail = false; ok1(tdb_store(tdb, wd.key, wd.data, TDB_INSERT) == 0); ok1(tdb_rescue(tdb, walk, &wd) == 0); ok1(!wd.fail); ok1(wd.count == 1); /* Corrupt the database, walk should either get it or not. */ size = tdb->map_size; for (i = sizeof(struct tdb_header); i < size; i++) { char c; if (tdb->methods->tdb_read(tdb, i, &c, 1, false) != 0) fail("Reading offset %i", i); if (tdb->methods->tdb_write(tdb, i, "X", 1) != 0) fail("Writing X at offset %i", i); wd.count = 0; if (tdb_rescue(tdb, count_records, &wd) != 0) { wd.fail = true; break; } /* Could be 0 or 1. */ if (wd.count > 1) { wd.fail = true; break; } if (tdb->methods->tdb_write(tdb, i, &c, 1) != 0) fail("Restoring offset %i", i); } ok1(log_count == 0); ok1(!wd.fail); tdb_close(tdb); /* Now try our known-corrupt db. */ tdb = tdb_open_ex("test/tdb.corrupt", 1024, 0, O_RDWR, 0, &taplogctx, NULL); wd.count = 0; ok1(tdb_rescue(tdb, count_records, &wd) == 0); ok1(wd.count == 1627); tdb_close(tdb); return exit_status(); } tdb-1.4.2/test/run-rwlock-check.c0000660000000000000000000000223312406075657016540 0ustar rootroot00000000000000#include "../common/tdb_private.h" #include "../common/io.c" #include "../common/tdb.c" #include "../common/lock.c" #include "../common/freelist.c" #include "../common/traverse.c" #include "../common/transaction.c" #include "../common/error.c" #include "../common/open.c" #include "../common/check.c" #include "../common/hash.c" #include "../common/mutex.c" #include "tap-interface.h" #include static void log_fn(struct tdb_context *tdb, enum tdb_debug_level level, const char *fmt, ...) { unsigned int *count = tdb_get_logging_private(tdb); if (strstr(fmt, "spinlocks")) (*count)++; } /* The code should barf on TDBs created with rwlocks. */ int main(int argc, char *argv[]) { struct tdb_context *tdb; unsigned int log_count; struct tdb_logging_context log_ctx = { log_fn, &log_count }; plan_tests(4); /* We should fail to open rwlock-using tdbs of either endian. */ log_count = 0; tdb = tdb_open_ex("test/rwlock-le.tdb", 0, 0, O_RDWR, 0, &log_ctx, NULL); ok1(!tdb); ok1(log_count == 1); log_count = 0; tdb = tdb_open_ex("test/rwlock-be.tdb", 0, 0, O_RDWR, 0, &log_ctx, NULL); ok1(!tdb); ok1(log_count == 1); return exit_status(); } tdb-1.4.2/test/run-summary.c0000660000000000000000000000421612406075657015664 0ustar rootroot00000000000000#include "../common/tdb_private.h" #include "../common/io.c" #include "../common/tdb.c" #include "../common/lock.c" #include "../common/freelist.c" #include "../common/traverse.c" #include "../common/transaction.c" #include "../common/error.c" #include "../common/open.c" #include "../common/check.c" #include "../common/hash.c" #include "../common/summary.c" #include "../common/mutex.c" #include "tap-interface.h" #include int main(int argc, char *argv[]) { unsigned int i, j; struct tdb_context *tdb; int flags[] = { TDB_INTERNAL, TDB_DEFAULT, TDB_NOMMAP, TDB_INTERNAL|TDB_CONVERT, TDB_CONVERT, TDB_NOMMAP|TDB_CONVERT }; TDB_DATA key = { (unsigned char *)&j, sizeof(j) }; TDB_DATA data = { (unsigned char *)&j, sizeof(j) }; char *summary; plan_tests(sizeof(flags) / sizeof(flags[0]) * 14); for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) { tdb = tdb_open("run-summary.tdb", 131, flags[i], O_RDWR|O_CREAT|O_TRUNC, 0600); ok1(tdb); if (!tdb) continue; /* Put some stuff in there. */ for (j = 0; j < 500; j++) { /* Make sure padding varies to we get some graphs! */ data.dsize = j % (sizeof(j) + 1); if (tdb_store(tdb, key, data, TDB_REPLACE) != 0) fail("Storing in tdb"); } summary = tdb_summary(tdb); diag("%s", summary); ok1(strstr(summary, "Size of file/data: ")); ok1(strstr(summary, "Number of records: 500\n")); ok1(strstr(summary, "Smallest/average/largest keys: 4/4/4\n")); ok1(strstr(summary, "Smallest/average/largest data: 0/2/4\n")); ok1(strstr(summary, "Smallest/average/largest padding: ")); ok1(strstr(summary, "Number of dead records: 0\n")); ok1(strstr(summary, "Number of free records: 1\n")); ok1(strstr(summary, "Smallest/average/largest free records: ")); ok1(strstr(summary, "Number of hash chains: 131\n")); ok1(strstr(summary, "Smallest/average/largest hash chains: ")); ok1(strstr(summary, "Number of uncoalesced records: 0\n")); ok1(strstr(summary, "Smallest/average/largest uncoalesced runs: 0/0/0\n")); ok1(strstr(summary, "Percentage keys/data/padding/free/dead/rechdrs&tailers/hashes: ")); free(summary); tdb_close(tdb); } return exit_status(); } tdb-1.4.2/test/run-transaction-expand.c0000660000000000000000000000605112406075657017770 0ustar rootroot00000000000000#include "../common/tdb_private.h" /* Speed up the tests, but do the actual sync tests. */ static unsigned int sync_counts = 0; static inline int fake_fsync(int fd) { sync_counts++; return 0; } #define fsync fake_fsync #ifdef MS_SYNC static inline int fake_msync(void *addr, size_t length, int flags) { sync_counts++; return 0; } #define msync fake_msync #endif #ifdef HAVE_FDATASYNC static inline int fake_fdatasync(int fd) { sync_counts++; return 0; } #define fdatasync fake_fdatasync #endif #include "../common/io.c" #include "../common/tdb.c" #include "../common/lock.c" #include "../common/freelist.c" #include "../common/traverse.c" #include "../common/transaction.c" #include "../common/error.c" #include "../common/open.c" #include "../common/check.c" #include "../common/hash.c" #include "../common/mutex.c" #include "tap-interface.h" #include #include "logging.h" static void write_record(struct tdb_context *tdb, size_t extra_len, TDB_DATA *data) { TDB_DATA key; key.dsize = strlen("hi"); key.dptr = discard_const_p(uint8_t, "hi"); data->dsize += extra_len; tdb_transaction_start(tdb); tdb_store(tdb, key, *data, TDB_REPLACE); tdb_transaction_commit(tdb); } int main(int argc, char *argv[]) { struct tdb_context *tdb; size_t i; TDB_DATA data; struct tdb_record rec; tdb_off_t off; /* Do *not* suppress sync for this test; we do it ourselves. */ unsetenv("TDB_NO_FSYNC"); plan_tests(5); tdb = tdb_open_ex("run-transaction-expand.tdb", 1024, TDB_CLEAR_IF_FIRST, O_CREAT|O_TRUNC|O_RDWR, 0600, &taplogctx, NULL); ok1(tdb); data.dsize = 0; data.dptr = calloc(1000, getpagesize()); if (data.dptr == NULL) { diag("Unable to allocate memory for data.dptr"); tdb_close(tdb); exit(1); } /* Simulate a slowly growing record. */ for (i = 0; i < 1000; i++) write_record(tdb, getpagesize(), &data); tdb_ofs_read(tdb, TDB_RECOVERY_HEAD, &off); tdb_read(tdb, off, &rec, sizeof(rec), DOCONV()); diag("TDB size = %zu, recovery = %llu-%llu", (size_t)tdb->map_size, (unsigned long long)off, (unsigned long long)(off + sizeof(rec) + rec.rec_len)); /* We should only be about 5 times larger than largest record. */ ok1(tdb->map_size < 6 * i * getpagesize()); tdb_close(tdb); tdb = tdb_open_ex("run-transaction-expand.tdb", 1024, TDB_CLEAR_IF_FIRST, O_CREAT|O_TRUNC|O_RDWR, 0600, &taplogctx, NULL); ok1(tdb); data.dsize = 0; /* Simulate a slowly growing record, repacking to keep * recovery area at end. */ for (i = 0; i < 1000; i++) { write_record(tdb, getpagesize(), &data); if (i % 10 == 0) tdb_repack(tdb); } tdb_ofs_read(tdb, TDB_RECOVERY_HEAD, &off); tdb_read(tdb, off, &rec, sizeof(rec), DOCONV()); diag("TDB size = %zu, recovery = %llu-%llu", (size_t)tdb->map_size, (unsigned long long)off, (unsigned long long)(off + sizeof(rec) + rec.rec_len)); /* We should only be about 4 times larger than largest record. */ ok1(tdb->map_size < 5 * i * getpagesize()); /* We should have synchronized multiple times. */ ok1(sync_counts); tdb_close(tdb); free(data.dptr); return exit_status(); } tdb-1.4.2/test/run-traverse-chain.c0000660000000000000000000000421313444661620017071 0ustar rootroot00000000000000#include "../common/tdb_private.h" #include "../common/io.c" #include "../common/tdb.c" #include "../common/lock.c" #include "../common/freelist.c" #include "../common/traverse.c" #include "../common/transaction.c" #include "../common/error.c" #include "../common/open.c" #include "../common/check.c" #include "../common/hash.c" #include "../common/mutex.c" #include "tap-interface.h" #include #include "logging.h" static char keystr0[] = "x"; static TDB_DATA key0 = { .dptr = (uint8_t *)keystr0, .dsize = sizeof(keystr0) }; static char valuestr0[] = "y"; static TDB_DATA value0 = { .dptr = (uint8_t *)valuestr0, .dsize = sizeof(valuestr0) }; static char keystr1[] = "aaa"; static TDB_DATA key1 = { .dptr = (uint8_t *)keystr1, .dsize = sizeof(keystr1) }; static char valuestr1[] = "bbbbb"; static TDB_DATA value1 = { .dptr = (uint8_t *)valuestr1, .dsize = sizeof(valuestr1) }; static TDB_DATA *keys[] = { &key0, &key1 }; static TDB_DATA *values[] = { &value0, &value1 }; static bool tdb_data_same(TDB_DATA d1, TDB_DATA d2) { if (d1.dsize != d2.dsize) { return false; } return (memcmp(d1.dptr, d2.dptr, d1.dsize) == 0); } struct traverse_chain_state { size_t idx; bool ok; }; static int traverse_chain_fn(struct tdb_context *tdb, TDB_DATA key, TDB_DATA data, void *private_data) { struct traverse_chain_state *state = private_data; state->ok &= tdb_data_same(key, *keys[state->idx]); state->ok &= tdb_data_same(data, *values[state->idx]); state->idx += 1; return 0; } int main(int argc, char *argv[]) { struct tdb_context *tdb; struct traverse_chain_state state = { .ok = true }; int ret; plan_tests(4); tdb = tdb_open_ex( "traverse_chain.tdb", 1, TDB_CLEAR_IF_FIRST, O_RDWR|O_CREAT, 0600, &taplogctx, NULL); ok1(tdb); /* add in reverse order, tdb_store adds to the front of the list */ ret = tdb_store(tdb, key1, value1, TDB_INSERT); ok1(ret == 0); ret = tdb_store(tdb, key0, value0, TDB_INSERT); ok1(ret == 0); ret = tdb_traverse_key_chain(tdb, key0, traverse_chain_fn, &state); ok1(ret == 2); ok1(state.ok); unlink(tdb_name(tdb)); tdb_close(tdb); return exit_status(); } tdb-1.4.2/test/run-traverse-in-transaction.c0000660000000000000000000000456213126252766020753 0ustar rootroot00000000000000#include "lock-tracking.h" #include "../common/tdb_private.h" #define fcntl fcntl_with_lockcheck #include "../common/io.c" #include "../common/tdb.c" #include "../common/lock.c" #include "../common/freelist.c" #include "../common/traverse.c" #include "../common/transaction.c" #include "../common/error.c" #include "../common/open.c" #include "../common/check.c" #include "../common/hash.c" #include "../common/mutex.c" #include "tap-interface.h" #undef fcntl_with_lockcheck #include #include #include "external-agent.h" #include "logging.h" static struct agent *agent; static bool correct_key(TDB_DATA key) { return key.dsize == strlen("hi") && memcmp(key.dptr, "hi", key.dsize) == 0; } static bool correct_data(TDB_DATA data) { return data.dsize == strlen("world") && memcmp(data.dptr, "world", data.dsize) == 0; } static int traverse(struct tdb_context *tdb, TDB_DATA key, TDB_DATA data, void *p) { ok1(correct_key(key)); ok1(correct_data(data)); return 0; } int main(int argc, char *argv[]) { struct tdb_context *tdb; TDB_DATA key, data; plan_tests(13); agent = prepare_external_agent(); tdb = tdb_open_ex("run-traverse-in-transaction.tdb", 1024, TDB_CLEAR_IF_FIRST, O_CREAT|O_TRUNC|O_RDWR, 0600, &taplogctx, NULL); ok1(tdb); key.dsize = strlen("hi"); key.dptr = discard_const_p(uint8_t, "hi"); data.dptr = discard_const_p(uint8_t, "world"); data.dsize = strlen("world"); ok1(tdb_store(tdb, key, data, TDB_INSERT) == 0); ok1(external_agent_operation(agent, OPEN, tdb_name(tdb)) == SUCCESS); ok1(tdb_transaction_active(tdb) == 0); ok1(tdb_transaction_start(tdb) == 0); ok1(tdb_transaction_active(tdb) == 1); ok1(external_agent_operation(agent, TRANSACTION_START, tdb_name(tdb)) == WOULD_HAVE_BLOCKED); tdb_traverse(tdb, traverse, NULL); /* That should *not* release the transaction lock! */ ok1(external_agent_operation(agent, TRANSACTION_START, tdb_name(tdb)) == WOULD_HAVE_BLOCKED); tdb_traverse_read(tdb, traverse, NULL); /* That should *not* release the transaction lock! */ ok1(external_agent_operation(agent, TRANSACTION_START, tdb_name(tdb)) == WOULD_HAVE_BLOCKED); ok1(tdb_transaction_commit(tdb) == 0); ok1(tdb_transaction_active(tdb) == 0); /* Now we should be fine. */ ok1(external_agent_operation(agent, TRANSACTION_START, tdb_name(tdb)) == SUCCESS); tdb_close(tdb); return exit_status(); } tdb-1.4.2/test/run-wronghash-fail.c0000660000000000000000000000577212406075657017110 0ustar rootroot00000000000000#include "../common/tdb_private.h" #include "../common/io.c" #include "../common/tdb.c" #include "../common/lock.c" #include "../common/freelist.c" #include "../common/traverse.c" #include "../common/transaction.c" #include "../common/error.c" #include "../common/open.c" #include "../common/check.c" #include "../common/hash.c" #include "../common/mutex.c" #include "tap-interface.h" #include static void log_fn(struct tdb_context *tdb, enum tdb_debug_level level, const char *fmt, ...) { unsigned int *count = tdb_get_logging_private(tdb); if (strstr(fmt, "hash")) (*count)++; } int main(int argc, char *argv[]) { struct tdb_context *tdb; unsigned int log_count; TDB_DATA d; struct tdb_logging_context log_ctx = { log_fn, &log_count }; plan_tests(28); /* Create with default hash. */ log_count = 0; tdb = tdb_open_ex("run-wronghash-fail.tdb", 0, 0, O_CREAT|O_RDWR|O_TRUNC, 0600, &log_ctx, NULL); ok1(tdb); ok1(log_count == 0); d.dptr = discard_const_p(uint8_t, "Hello"); d.dsize = 5; ok1(tdb_store(tdb, d, d, TDB_INSERT) == 0); tdb_close(tdb); /* Fail to open with different hash. */ tdb = tdb_open_ex("run-wronghash-fail.tdb", 0, 0, O_RDWR, 0, &log_ctx, tdb_jenkins_hash); ok1(!tdb); ok1(log_count == 1); /* Create with different hash. */ log_count = 0; tdb = tdb_open_ex("run-wronghash-fail.tdb", 0, 0, O_CREAT|O_RDWR|O_TRUNC, 0600, &log_ctx, tdb_jenkins_hash); ok1(tdb); ok1(log_count == 0); tdb_close(tdb); /* Endian should be no problem. */ log_count = 0; tdb = tdb_open_ex("test/jenkins-le-hash.tdb", 0, 0, O_RDWR, 0, &log_ctx, tdb_old_hash); ok1(!tdb); ok1(log_count == 1); log_count = 0; tdb = tdb_open_ex("test/jenkins-be-hash.tdb", 0, 0, O_RDWR, 0, &log_ctx, tdb_old_hash); ok1(!tdb); ok1(log_count == 1); log_count = 0; /* Fail to open with old default hash. */ tdb = tdb_open_ex("run-wronghash-fail.tdb", 0, 0, O_RDWR, 0, &log_ctx, tdb_old_hash); ok1(!tdb); ok1(log_count == 1); log_count = 0; tdb = tdb_open_ex("test/jenkins-le-hash.tdb", 0, 0, O_RDONLY, 0, &log_ctx, tdb_jenkins_hash); ok1(tdb); ok1(log_count == 0); ok1(tdb_check(tdb, NULL, NULL) == 0); tdb_close(tdb); log_count = 0; tdb = tdb_open_ex("test/jenkins-be-hash.tdb", 0, 0, O_RDONLY, 0, &log_ctx, tdb_jenkins_hash); ok1(tdb); ok1(log_count == 0); ok1(tdb_check(tdb, NULL, NULL) == 0); tdb_close(tdb); /* It should open with jenkins hash if we don't specify. */ log_count = 0; tdb = tdb_open_ex("test/jenkins-le-hash.tdb", 0, 0, O_RDWR, 0, &log_ctx, NULL); ok1(tdb); ok1(log_count == 0); ok1(tdb_check(tdb, NULL, NULL) == 0); tdb_close(tdb); log_count = 0; tdb = tdb_open_ex("test/jenkins-be-hash.tdb", 0, 0, O_RDWR, 0, &log_ctx, NULL); ok1(tdb); ok1(log_count == 0); ok1(tdb_check(tdb, NULL, NULL) == 0); tdb_close(tdb); log_count = 0; tdb = tdb_open_ex("run-wronghash-fail.tdb", 0, 0, O_RDONLY, 0, &log_ctx, NULL); ok1(tdb); ok1(log_count == 0); ok1(tdb_check(tdb, NULL, NULL) == 0); tdb_close(tdb); return exit_status(); } tdb-1.4.2/test/run-zero-append.c0000660000000000000000000000201612406075657016407 0ustar rootroot00000000000000#include "../common/tdb_private.h" #include "../common/io.c" #include "../common/tdb.c" #include "../common/lock.c" #include "../common/freelist.c" #include "../common/traverse.c" #include "../common/transaction.c" #include "../common/error.c" #include "../common/open.c" #include "../common/check.c" #include "../common/hash.c" #include "../common/mutex.c" #include "tap-interface.h" #include #include "logging.h" int main(int argc, char *argv[]) { struct tdb_context *tdb; TDB_DATA key, data; plan_tests(4); tdb = tdb_open_ex(NULL, 1024, TDB_INTERNAL, O_CREAT|O_TRUNC|O_RDWR, 0600, &taplogctx, NULL); ok1(tdb); /* Tickle bug on appending zero length buffer to zero length buffer. */ key.dsize = strlen("hi"); key.dptr = discard_const_p(uint8_t, "hi"); data.dptr = discard_const_p(uint8_t, "world"); data.dsize = 0; ok1(tdb_append(tdb, key, data) == 0); ok1(tdb_append(tdb, key, data) == 0); data = tdb_fetch(tdb, key); ok1(data.dsize == 0); tdb_close(tdb); free(data.dptr); return exit_status(); } tdb-1.4.2/test/run.c0000660000000000000000000000246412406075657014174 0ustar rootroot00000000000000#include "../common/tdb_private.h" #include "../common/io.c" #include "../common/tdb.c" #include "../common/lock.c" #include "../common/freelist.c" #include "../common/traverse.c" #include "../common/transaction.c" #include "../common/error.c" #include "../common/open.c" #include "../common/check.c" #include "../common/hash.c" #include "../common/mutex.c" #include "tap-interface.h" #include #include "logging.h" int main(int argc, char *argv[]) { struct tdb_context *tdb; TDB_DATA key, data; plan_tests(10); tdb = tdb_open_ex("run.tdb", 1024, TDB_CLEAR_IF_FIRST, O_CREAT|O_TRUNC|O_RDWR, 0600, &taplogctx, NULL); ok1(tdb); key.dsize = strlen("hi"); key.dptr = discard_const_p(uint8_t, "hi"); data.dsize = strlen("world"); data.dptr = discard_const_p(uint8_t, "world"); ok1(tdb_store(tdb, key, data, TDB_MODIFY) < 0); ok1(tdb_error(tdb) == TDB_ERR_NOEXIST); ok1(tdb_store(tdb, key, data, TDB_INSERT) == 0); ok1(tdb_store(tdb, key, data, TDB_INSERT) < 0); ok1(tdb_error(tdb) == TDB_ERR_EXISTS); ok1(tdb_store(tdb, key, data, TDB_MODIFY) == 0); data = tdb_fetch(tdb, key); ok1(data.dsize == strlen("world")); ok1(memcmp(data.dptr, "world", strlen("world")) == 0); free(data.dptr); key.dsize++; data = tdb_fetch(tdb, key); ok1(data.dptr == NULL); tdb_close(tdb); return exit_status(); } tdb-1.4.2/test/rwlock-be.tdb0000660000000000000000000000127012406075657015576 0ustar rootroot00000000000000TDB file m&ƒgÅ/tdb-1.4.2/test/rwlock-le.tdb0000660000000000000000000000127012406075657015610 0ustar rootroot00000000000000TDB file m&ƒgÅ/tdb-1.4.2/test/sample_tdb.tdb0000660000000000000000000002000013444661620016005 0ustar rootroot00000000000000TDB file m&ƒQ­ å”¶×D¹v¸ÀthfæþÙ|ì #nY„¨™&rpc_server972.2147483647/1085706313786795392972.100/1085706313786795392972.106/1085706313786795392972.95/1085706313786795392972.101/1085706313786795392972.113/1085706313786795392972.113/1085706313786795392€d #a¡]5fæþÙldap_server972.2147483650/1085706313786795392972.108/1085706313786795392|¨ ’nY„¨fæþÙrpc_server972.2147483647/1085706313786795392972.100/1085706313786795392972.106/1085706313786795392972.95/1085706313786795392972.101/1085706313786795392À4òoý™¬ify-daemon992/6389638235474936598L8ß%È™&winbind_server977/12826542715097898407P´à#h¬·cfæþÙdnssrv972.2147483658/1085706313786795392P< #{Ö­•™&dnsupdate972.2147483657/1085706313786795392¤8#E0]™&kccsrv972.2147483656/1085706313786795392ô< #ùcX™&dreplsrv972.2147483653/1085706313786795392H@ #Zž„8™&kdc_server972.2147483652/1085706313786795392 @ #>s™&cldap_server972.2147483651/1085706313786795392øh˜ #a¡]5fæþÙldap_server972.2147483650/1085706313786795392X@ #\yÀ™&wrepl_server972.2147483649/1085706313786795392°@ #^¬Â™&nbt_server972.2147483648/1085706313786795392X8@ #2óáÀfæþÙwins_server972.2147483648/1085706313786795392X¸€ #nY„¨fæþÙrpc_server972.2147483647/1085706313786795392X(B:0R™&samba0/1085706313786795392˜tdb-1.4.2/test/tap-interface.h0000660000000000000000000000334612406075657016117 0ustar rootroot00000000000000/* Unix SMB/CIFS implementation. Simplistic implementation of tap interface. Copyright (C) Rusty Russell 2012 ** NOTE! The following LGPL license applies to the talloc ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include #ifndef __location__ #define __TAP_STRING_LINE1__(s) #s #define __TAP_STRING_LINE2__(s) __TAP_STRING_LINE1__(s) #define __TAP_STRING_LINE3__ __TAP_STRING_LINE2__(__LINE__) #define __location__ __FILE__ ":" __TAP_STRING_LINE3__ #endif #define plan_tests(num) #define fail(...) do { \ fprintf(stderr, __VA_ARGS__); \ fprintf(stderr, "\n"); \ fflush(stderr); \ exit(1); \ } while(0) #define diag(...) do { \ fprintf(stdout, __VA_ARGS__); \ fprintf(stdout, "\n"); \ fflush(stdout); \ } while(0) #define pass(...) do { \ fprintf(stdout, "."); \ fflush(stdout); \ } while(0) #define ok(e, ...) do { \ if (e) { \ pass(); \ } else { \ fail(__VA_ARGS__); \ } \ } while(0) #define ok1(e) ok((e), "%s:%s", __location__, #e) #define skip(n, ...) diag(__VA_ARGS__) #define exit_status() 0 tdb-1.4.2/test/tap-to-subunit.h0000660000000000000000000001226612406075657016271 0ustar rootroot00000000000000#ifndef TAP_TO_SUBUNIT_H #define TAP_TO_SUBUNIT_H /* * tap-style wrapper for subunit. * * Copyright (c) 2011 Rusty Russell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "replace.h" /** * plan_tests - announce the number of tests you plan to run * @tests: the number of tests * * This should be the first call in your test program: it allows tracing * of failures which mean that not all tests are run. * * If you don't know how many tests will actually be run, assume all of them * and use skip() if you don't actually run some tests. * * Example: * plan_tests(13); */ void plan_tests(unsigned int tests); /** * ok1 - Simple conditional test * @e: the expression which we expect to be true. * * This is the simplest kind of test: if the expression is true, the * test passes. The name of the test which is printed will simply be * file name, line number, and the expression itself. * * Example: * ok1(somefunc() == 1); */ # define ok1(e) ((e) ? \ _gen_result(1, __func__, __FILE__, __LINE__, "%s", #e) : \ _gen_result(0, __func__, __FILE__, __LINE__, "%s", #e)) /** * ok - Conditional test with a name * @e: the expression which we expect to be true. * @...: the printf-style name of the test. * * If the expression is true, the test passes. The name of the test will be * the filename, line number, and the printf-style string. This can be clearer * than simply the expression itself. * * Example: * ok1(somefunc() == 1); * ok(somefunc() == 0, "Second somefunc() should fail"); */ # define ok(e, ...) ((e) ? \ _gen_result(1, __func__, __FILE__, __LINE__, \ __VA_ARGS__) : \ _gen_result(0, __func__, __FILE__, __LINE__, \ __VA_ARGS__)) /** * pass - Note that a test passed * @...: the printf-style name of the test. * * For complicated code paths, it can be easiest to simply call pass() in one * branch and fail() in another. * * Example: * int x = somefunc(); * if (x > 0) * pass("somefunc() returned a valid value"); * else * fail("somefunc() returned an invalid value"); */ # define pass(...) ok(1, __VA_ARGS__) /** * fail - Note that a test failed * @...: the printf-style name of the test. * * For complicated code paths, it can be easiest to simply call pass() in one * branch and fail() in another. */ # define fail(...) ok(0, __VA_ARGS__) unsigned int _gen_result(int, const char *, const char *, unsigned int, const char *, ...) PRINTF_ATTRIBUTE(5, 6); /** * diag - print a diagnostic message (use instead of printf/fprintf) * @fmt: the format of the printf-style message * * diag ensures that the output will not be considered to be a test * result by the TAP test harness. It will append '\n' for you. * * Example: * diag("Now running complex tests"); */ void diag(const char *fmt, ...) PRINTF_ATTRIBUTE(1, 2); /** * skip - print a diagnostic message (use instead of printf/fprintf) * @n: number of tests you're skipping. * @fmt: the format of the reason you're skipping the tests. * * Sometimes tests cannot be run because the test system lacks some feature: * you should explicitly document that you're skipping tests using skip(). * * From the Test::More documentation: * If it's something the user might not be able to do, use SKIP. This * includes optional modules that aren't installed, running under an OS that * doesn't have some feature (like fork() or symlinks), or maybe you need an * Internet connection and one isn't available. * * Example: * #ifdef HAVE_SOME_FEATURE * ok1(somefunc()); * #else * skip(1, "Don't have SOME_FEATURE"); * #endif */ void skip(unsigned int n, const char *fmt, ...) PRINTF_ATTRIBUTE(2, 3); /** * exit_status - the value that main should return. * * For maximum compatibility your test program should return a particular exit * code (ie. 0 if all tests were run, and every test which was expected to * succeed succeeded). * * Example: * exit(exit_status()); */ int exit_status(void); #endif /* CCAN_TAP_H */ tdb-1.4.2/test/tdb.corrupt0000660000000000000000000057000012406075657015411 0ustar rootroot00000000000000TDB file m&ƒ„~‚Xd¸ø`}pŽÄI Hàâp€°P†À ðÍt} ´`Ã0g˜w€Ð ¸ƒ Ð'€ƒ`8(…{Ðü/ˆ’„Æ€|`Øà¾@ŒÁ\IÈ ’Ðp8Ü Å¸‹H(•DÈÀˆ€ŠH›0‚TË (€)äÊx¾€®à€ Ax_¬Ì;za@~ЀïüJ`’«Ði £¨,PL¬<Ø‘°^`„øöü%@·°~€ÍPܰŒ ñ$ɬótK\-è.Ä|¼‘4L&°j\„`Ñð‘(XÆP /4 Úp‡0n,G¼‚ð|€‘`ΰ…ÀÎÀº(iÈD0-¸ °Ë‡T,L?à¤ÌÎðƒà¹fæþÙBBBBBBBBBBBBBBBB,X<JìÄ™&IDMAP/GID2SID/10000045 1251822591/S-1-5-21-1834383793-1770918451-929701000-63064Bp¨LX=fJw¯™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-63064 1251822591/10000045BpX=tO8™&IDMAP/GID2SID/10000044 1251824032/S-1-5-21-1834383793-1770918451-929701000-143659pt X> „lá™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-143659 1251824032/10000044pX=ž²«™&IDMAP/GID2SID/10000043 1243613530/S-1-5-21-1834383793-1770918451-929701000-112509pX>ß>î±™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-112509 1243613530/10000043pX;È™&IDMAP/GID2SID/10000042 1251822591/S-1-5-21-1834383793-1770918451-929701000-1390BBpˆºX<9Îá™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-1390 1251822591/10000042BBp5X<òx’™&IDMAP/GID2SID/10000041 1251258541/S-1-5-21-1834383793-1770918451-929701000-42600Bp`®X=^j1™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-42600 1251258541/10000041Bp°&X<Ü™&IDMAP/GID2SID/10000040 1251822591/S-1-5-21-1834383793-1770918451-929701000-68419BpðvX=Š5T¹™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-68419 1251822591/10000040BpX;í»úÚ™&IDMAP/GID2SID/10000039 1251822591/S-1-5-21-1834383793-1770918451-929701000-1406BBp€ÉX<A(„À™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-1406 1251822591/10000039BBpX=íå]N™&IDMAP/GID2SID/10000038 1251824032/S-1-5-21-1834383793-1770918451-929701000-154049p€¼X>ƒôxï™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-154049 1251824032/10000038pÜX=íÁÁ™&IDMAP/GID2SID/10000037 1250795400/S-1-5-21-1834383793-1770918451-929701000-119283p¨ X>Ûö™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-119283 1250795400/10000037pX<í9$5™&IDMAP/GID2SID/10000036 1251246539/S-1-5-21-1834383793-1770918451-929701000-42610Bp|$X= ¤J™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-42610 1251246539/10000036BpX<íc‡¨™&IDMAP/GID2SID/10000035 1251822591/S-1-5-21-1834383793-1770918451-929701000-22077Bp¨X=’ÄÍG™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-22077 1251822591/10000035Bp\ X<íê™&IDMAP/GID2SID/10000034 1251823756/S-1-5-21-1834383793-1770918451-929701000-25222Bp°“X=¶˜Œ:™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-25222 1251823756/10000034BpX<í·M™&IDMAP/GID2SID/10000033 1251822591/S-1-5-21-1834383793-1770918451-929701000-65941BpèX=Ž6Í™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-65941 1251822591/10000033BpX=íá°™&IDMAP/GID2SID/10000032 1250723401/S-1-5-21-1834383793-1770918451-929701000-107777pð»X>·ÈÚà™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-107777 1250723401/10000032pX<í v™&IDMAP/GID2SID/10000031 1251824032/S-1-5-21-1834383793-1770918451-929701000-79185Bpô X=>K&w™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-79185 1251824032/10000031BpX<í5wé™&IDMAP/GID2SID/10000030 1251331835/S-1-5-21-1834383793-1770918451-929701000-22078Bp ”X=úô*™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-22078 1251331835/10000030BpÐMX:=Õ•¾™&IDMAP/GID2SID/10000029 1251246539/S-1-5-21-1834383793-1770918451-929701000-513BBBp(ÒX;ÔÑ·j™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-513 1251246539/10000029BBBpømX<ÍySX™&IDMAP/UID2SID/10000050 1251258541/S-1-5-21-1834383793-1770918451-929701000-36003Bp OX=FÚY™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-36003 1251258541/10000050Bpü"X<=S¿™&IDMAP/GID2SID/10000026 1251246553/S-1-5-21-1834383793-1770918451-929701000-89152Bp©X=ºUØê™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-89152 1251246553/10000026Bp4=}"Œ™&IDMAP/GID2SID/10000025 1251246539/S-1-5-11BBBL%4u¼±†™&IDMAP/SID2GID/S-1-5-11 1251246539/10000025BBBLX:=§…ÿ™&IDMAP/GID2SID/10000024 1251246553/S-1-5-21-1834383793-1770918451-929701000-515BBBpìX;ÔÑw™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-515 1251246553/10000024BBBpX=CÕ ™&IDMAP/UID2SID/10000048 1250712333/S-1-5-21-1834383793-1770918451-929701000-119484pì:X>›hŽÌ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119484 1250712333/10000048pX=m8™&IDMAP/UID2SID/10000047 1250643921/S-1-5-21-1834383793-1770918451-929701000-119489p(X>›ØÅ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119489 1250643921/10000047pX=Áþú™&IDMAP/UID2SID/10000045 1250618989/S-1-5-21-1834383793-1770918451-929701000-119367pàÔX>ïÓÚ-™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119367 1250618989/10000045pX=?(U™&IDMAP/UID2SID/10000042 1250643920/S-1-5-21-1834383793-1770918451-929701000-119322pøTX>H™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119322 1250643920/10000042pX=m†Ó÷™&IDMAP/UID2SID/10000037 1250618989/S-1-5-21-1834383793-1770918451-929701000-119366pà±X>ï#ôÈ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119366 1251224062/10000037pX=mýQ™&IDMAP/UID2SID/10000034 1251224061/S-1-5-21-1834383793-1770918451-929701000-119469pHôX>›mÁþ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119469 1251224061/10000034pX=m‚&¬™&IDMAP/UID2SID/10000031 1250618998/S-1-5-21-1834383793-1770918451-929701000-119472p0ºX>Ó™™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119472 1250618998/10000031pX=½u h™&IDMAP/UID2SID/10000028 1245435540/S-1-5-21-1834383793-1770918451-929701000-143691p¼?X>s5ª™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143691 1245435540/10000028pX=½ó4™&IDMAP/UID2SID/10000025 1250643920/S-1-5-21-1834383793-1770918451-929701000-119332pÜàX>oÃã‹™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119332 1250643920/10000025pX=½q^™&IDMAP/UID2SID/10000022 1250714212/S-1-5-21-1834383793-1770918451-929701000-119305p0ÈX>ï2"™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119305 1250714212/10000022pX= eCØ™&IDMAP/UID2SID/10000019 1250884333/S-1-5-21-1834383793-1770918451-929701000-119473p8ÀX>ƒ€„™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119473 1250884333/10000019pX= ãl2™&IDMAP/UID2SID/10000016 1250815021/S-1-5-21-1834383793-1770918451-929701000-119397p :X>otP×™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119397 1250815021/10000016pX= a–Œ™&IDMAP/UID2SID/10000013 1250726048/S-1-5-21-1834383793-1770918451-929701000-119404pÉX>›¼T³™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119404 1250726048/10000013pX= ß¿æ™&IDMAP/UID2SID/10000010 1250717566/S-1-5-21-1834383793-1770918451-929701000-119329pŒ6X>ï] k™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119329 1250717566/10000010pX=]¨A/™&IDMAP/UID2SID/10000008 1250705448/S-1-5-21-1834383793-1770918451-929701000-119310pX>oøÇû™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119310 1250705448/10000008pX=]PÎü™&IDMAP/UID2SID/10000004 1250714212/S-1-5-21-1834383793-1770918451-929701000-119334pà‰X>o#±U™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119334 1250714212/10000004pX=]¤”ã™&IDMAP/UID2SID/10000002 1250886708/S-1-5-21-1834383793-1770918451-929701000-119303pT X>ïÒTG™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119303 1250886708/10000002p8Ý[’l™&IDMAP/GID2SID/10000007 1251246513/S-1-5-32-545BBBP8ݯXS™&IDMAP/GID2SID/10000005 1251246513/S-1-5-32-544BBBP(Ä8¥õcy™&IDMAP/SID2GID/S-1-5-32-544 1251246513/10000005BBBP8êƒV™&IDMAP/GID2SID/10000013 1243558577/S-1-5-32-546BBBP@“8Ã-²™&IDMAP/SID2GID/S-1-5-32-545 1251246513/10000007BBBP0h­°™&IDMAP/GID2SID/10000010 1251246513/S-1-5-2HЧ0!ë›™&IDMAP/SID2GID/S-1-5-2 1251246513/10000010HÐb8 ïÿ’™&NBT/AD.VIACOM.COM#1C 1251229040/166.77.86.17:3891PPL+´jaµ™&NBT/BUBBLEBUDDY.PARAMOUNT.AD.VIACOM.COM#20 1251229216/166.77.172.94:0d0ÝÌ…™&IDMAP/GID2SID/10000009 1251246513/S-1-1-0HÈ0 Sq|™&IDMAP/SID2GID/S-1-1-0 1251246513/10000009H0Ý1/ù™&IDMAP/GID2SID/10000008 1251819747/S-1-0-0H4 0 SÅB™&IDMAP/SID2GID/S-1-0-0 1251819747/10000008HHæ$Z›;ö™&IDMAP/GID2SID/0 1251227602/-<¸}l>ïÞ[*™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119385 1250812305/10000029ch.ad.viacom.com-B„”$ÚN,™&IDMAP/UID2SID/0 1251228589/-B<¸ (™÷ ofæþÙSAF/DOMA$$—Lpã™&IDMAP/UID2SID/99 1251227602/-<Ê<(S¢f™&SAF/DOMAIN/IPT 1251229423/unitynyad02.ipt.viacom.comBT D"0(;™&NBT/UNITYNYAD02.IPT.VIACOM.COM#20 1251229040/172.21.200.28:0B\@"‘i_A™&AD_SITENAME/DOMAIN/IPT.VIACOM.COM 4294967295/IPT-US-NYCBBXH!#"ï¤ï™&AD_SITENAME/DOMAIN/AD.VIACOM.COM 4294967295/US-California-Burbank`@#þr²™&AD_SITENAME/DOMAIN/AD 4294967295/US-California-BurbankBBBX@cXó™&AD_SITENAME/DOMAIN/IPT 4294967295/IPT-US-NYC6:389,166.77.XLÍ-xÄ¡fæþÙSAF/DOMAIN/MTVN 1208ˆX=ræL™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-66812 1251824050/100000837px.X=¦r' ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-79085 1251148458/10000066p /<ÆHÕ1™&NBT/VAD01.TAGWORLD.LOCAL#20 1251234381/10.64.0.101:0BThD!¯¸Ù™&NBT/MUSSELBEACH.AD.VIACOM.COM#20 1251229040/166.77.86.17:0BBB\Є@ Þ¼R™&NBT/NETDEV.VIACOM.COM#1C 1251229222/166.77.173.156:389m.cX_T".±Àv`™&SAF/DOMAIN/MTVNASIA.AD.VIACOM.COM 1251229469/SQUILLIAM.mtvnasia.ad.viacom.comlP&#hK™&AD_SITENAME/DOMAIN/MTVN.AD.VIACOM.COM 4294967295/US-California-BurbankBBBh@#HZrj™&AD_SITENAME/DOMAIN/MTVN 4294967295/US-California-BurbankBXè.H#Wo™&NBT/BILLYBOB.MTVN.AD.VIACOM.COM#20 1251217243/172.23.128.101:0BBB`tD!½týŸ™&NBT/MRSTAR.MTVN.AD.VIACOM.COM#20 1251217243/166.77.86.16:0BBB\(ÙD!yOé™&NBT/ALPUSS.MTVN.AD.VIACOM.COM#20 1251217243/166.77.80.155:0BB\XôD ‘ePó™&NBT/HOSTING.AD.VIACOM.COM#1C 1251229244/166.77.173.152:3893.1\²X3RÖl™™&NBT/PLAYASUR.AD.VIACOM.COM#1C 1251229229/166.77.172.111:389,166.77.172.128:389BBp|X>Ã_g™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119272 1243615575/10000056pX=]Î÷V™&IDMAP/UID2SID/10000001 1250618991/S-1-5-21-1834383793-1770918451-929701000-119378p¨êX>o¹èu™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119378 1250618991/10000001p¤ X=½Å$™&IDMAP/UID2SID/10000020 1250712333/S-1-5-21-1834383793-1770918451-929701000-119389pÈÆX>ïžö½™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119389 1250712333/10000020p\;X=m\p„™&IDMAP/UID2SID/10000038 1250705447/S-1-5-21-1834383793-1770918451-929701000-119479pÐÃX>£èá™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119479 1250705447/10000038pX=½›Á™&IDMAP/UID2SID/10000021 1250643921/S-1-5-21-1834383793-1770918451-929701000-119357p%X>ož³J™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119357 1250643921/10000021pX=m2 ™&IDMAP/UID2SID/10000039 1250891274/S-1-5-21-1834383793-1770918451-929701000-119324pÅX>ïí‰r™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119324 1250891274/10000039pàRX= 73™&IDMAP/UID2SID/10000014 1250886708/S-1-5-21-1834383793-1770918451-929701000-119313p@¡X>o|*™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119313 1250886708/10000014pX=r-™&IDMAP/UID2SID/10000049 1250643921/S-1-5-21-1834383793-1770918451-929701000-119381pØX>ïÁ–™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119381 1250643921/10000049päX=m¬‰™&IDMAP/UID2SID/10000030 1250717565/S-1-5-21-1834383793-1770918451-929701000-119327p¸HX>ïý=¡™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119327 1250717565/10000030pX=i‹È™&IDMAP/UID2SID/10000041 1250624364/S-1-5-21-1834383793-1770918451-929701000-119307p¨SX>ï’ïÚ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119307 1250624364/10000041p@WX=]~Þ»™&IDMAP/UID2SID/10000009 1250898946/S-1-5-21-1834383793-1770918451-929701000-119314pp¸X>o¸b™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119314 1250898946/10000009pX=m.`Å™&IDMAP/UID2SID/10000033 1250643921/S-1-5-21-1834383793-1770918451-929701000-119490pˆÝX>Þ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119490 1250643921/10000033pX=ëan™&IDMAP/UID2SID/10000044 1250705447/S-1-5-21-1834383793-1770918451-929701000-119319p|ñX>o(䇙&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119319 1250705447/10000044pX<]ã©™&IDMAP/GID2SID/10000084 1251822594/S-1-5-21-1834383793-1770918451-929701000-58035BpŒ=X=âzöÒ™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-58035 1251822594/10000084BpX=]9F™&IDMAP/GID2SID/10000083 1251823992/S-1-5-21-1834383793-1770918451-929701000-113650p,@X> Tj™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-113650 1251823992/10000083pX=]c©™&IDMAP/GID2SID/10000082 1251822594/S-1-5-21-2140803266-1626024873-1299147156-25493p ­X>b2?ã™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-25493 1251822594/10000082pX=] ™&IDMAP/GID2SID/10000081 1251823992/S-1-5-21-2140803266-1626024873-1299147156-29643pø&X>:š-·™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-29643 1251823992/10000081p€\X=]·ow™&IDMAP/GID2SID/10000080 1251822594/S-1-5-21-2140803266-1626024873-1299147156-25491p ¿X>bÒq™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-25491 1251822594/10000080pX<­VŽL™&IDMAP/GID2SID/10000079 1251822594/S-1-5-21-4186143834-2626045635-1021053583-1158BpŒDX=O­á<™&IDMAP/SID2GID/S-1-5-21-4186143834-2626045635-1021053583-1158 1251822594/10000079BpX=­€ñ¿™&IDMAP/GID2SID/10000078 1251822594/S-1-5-21-2140803266-1626024873-1299147156-24252pÌX> 9)™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-24252 1251822594/10000078pX=­ªT3™&IDMAP/GID2SID/10000077 1251822594/S-1-5-21-2140803266-1626024873-1299147156-31647pè<X=Šœß1™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-56613 1251332444/10000092pX=­Ô·¦™&IDMAP/GID2SID/10000076 1251822594/S-1-5-21-2140803266-1626024873-1299147156-25443pˆ.X>:Z3.™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-31647 1251822594/10000077pX=­þ™&IDMAP/GID2SID/10000075 1251822594/S-1-5-21-2140803266-1626024873-1299147156-24253pkX> é{™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-24253 1251822594/10000075pX=­(~™&IDMAP/GID2SID/10000074 1251822594/S-1-5-21-2140803266-1626024873-1299147156-31834p•X>ˆŠ*™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-31834 1251822594/10000074pX=­Rá™&IDMAP/GID2SID/10000073 1251822594/S-1-5-21-2140803266-1626024873-1299147156-21662p˜X>:U•­™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-21662 1251822594/10000073pX<­|Dt™&IDMAP/GID2SID/10000072 1251822594/S-1-5-21-4186143834-2626045635-1021053583-7612Bp˜~X=Ÿ…ó¦™&IDMAP/SID2GID/S-1-5-21-4186143834-2626045635-1021053583-7612 1251822594/10000072BpüX=­¦§ç™&IDMAP/GID2SID/10000071 1251823992/S-1-5-21-2140803266-1626024873-1299147156-29588ph­X>ަ25™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-29588 1251823992/10000071p$X=­Ð [™&IDMAP/GID2SID/10000070 1251822593/S-1-5-21-2140803266-1626024873-1299147156-18854p$ÂX>ó¢ñ™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-18854 1251822593/10000070p¼X=ýo)0™&IDMAP/GID2SID/10000069 1248309549/S-1-5-21-2140803266-1626024873-1299147156-33933pXÛX>¾}g™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-33933 1248309549/10000069pâP. -™&SAF/DOMAIN/HOSTING 1251229484/misstuftsy.hosting.ad.viacom.com.77.172.128h †T3áS«™&NBT/MTVNE.AD.VIACOM.COM#1C 1251229229/166.77.172.124:389,166.77.172.123:389BBlðUT-#°ømâ™&AD_SITENAME/DOMAIN/VIACOM_CORP.AD.VIACOM.COM 4294967295/US-California-Burbankl@PH#jËÂ&™&AD_SITENAME/DOMAIN/VIACOM_CORP 4294967295/US-California-BurbankBB`°W`1)¨™ ™&SAF/DOMAIN/PARAMOUNT 1251229469/bubblebuddy.paramount.ad.viacom.com.COM#20 1247397223/1x0‰T!.ÒrX?™&SAF/DOMAIN/HOSTING.AD.VIACOM.COM 1251229484/misstuftsy.hosting.ad.viacom.comBlHÃX<ý™Œ£™&IDMAP/GID2SID/10000068 1251822591/S-1-5-21-1834383793-1770918451-929701000-75573Bp0[X=’¦’¥™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-75573 1251822591/10000068BpX<ýÃï™&IDMAP/GID2SID/10000067 1251822591/S-1-5-21-1834383793-1770918451-929701000-85165BpX=æ/»a™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-85165 1251822591/10000067BpX=ýíRŠ™&IDMAP/GID2SID/10000066 1251246539/S-1-5-21-1834383793-1770918451-929701000-119942p0—X>+A6™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-119942 1251246539/10000066p¼X=ý¶ý™&IDMAP/GID2SID/10000065 1251822591/S-1-5-21-1834383793-1770918451-929701000-100732p X>·‚¾™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-100732 1251822591/10000065pX<ýAq™&IDMAP/GID2SID/10000064 1251823775/S-1-5-21-1834383793-1770918451-929701000-43457BpØ|X=:m"™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-43457 1251823775/10000064BpX<ýk|ä™&IDMAP/GID2SID/10000063 1251822591/S-1-5-21-1834383793-1770918451-929701000-64119Bp ÁX=Š5ˆ™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-64119 1251822591/10000063BpœX<ý•ßW™&IDMAP/GID2SID/10000062 1251822591/S-1-5-21-1834383793-1770918451-929701000-64117Bp(&X=ŠÊ9Q™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-64117 1251822591/10000062BpX<ý¿BË™&IDMAP/GID2SID/10000061 1251822591/S-1-5-21-1834383793-1770918451-929701000-43582Bp_X=¾¢T•™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-43582 1251822591/10000061BpX=ýé¥>™&IDMAP/GID2SID/10000060 1251823334/S-1-5-21-1834383793-1770918451-929701000-136294pè'X>[Ør™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-136294 1251823334/10000060pX=M‰Ä™&IDMAP/GID2SID/10000059 1251822591/S-1-5-21-1834383793-1770918451-929701000-103603pè©X>‹X”Ú™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-103603 1251822591/10000059pX=M³'‡™&IDMAP/GID2SID/10000058 1251822591/S-1-5-21-1834383793-1770918451-929701000-133857ph×X>c—f×™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-133857 1251822591/10000058pœX=MÝŠú™&IDMAP/GID2SID/10000057 1243613530/S-1-5-21-1834383793-1770918451-929701000-112480pX>3ò7™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-112480 1243613530/10000057pì*X=Mîm™&IDMAP/GID2SID/10000056 1251824032/S-1-5-21-1834383793-1770918451-929701000-141499p0MX>³æÅ™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-141499 1251824032/10000056pX=M1Qá™&IDMAP/GID2SID/10000055 1251824032/S-1-5-21-1834383793-1770918451-929701000-140543p(^X>ßô£1™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-140543 1251824032/10000055pß~S™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-112505 1243613530/10000053pX=M¯z;™&IDMAP/GID2SID/10000052 1251822591/S-1-5-21-1834383793-1770918451-929701000-129742pXœX>7¸yv™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-129742 1251822591/10000052pX<MÙÝ®™&IDMAP/GID2SID/10000051 1251822591/S-1-5-21-1834383793-1770918451-929701000-68189Bp·X=>Éä‚™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-68189 1251822591/10000051BpX=MA"™&IDMAP/GID2SID/10000050 1251823334/S-1-5-21-1834383793-1770918451-929701000-136393p¬CX> ™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-136393 1251823334/10000050p\X<¢_÷™&IDMAP/GID2SID/10000049 1251822591/S-1-5-21-1834383793-1770918451-929701000-63066Bp£X=fµÅu™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-63066 1251822591/10000049BpX<ÌÂj™&IDMAP/GID2SID/10000048 1251822591/S-1-5-21-1834383793-1770918451-929701000-63067Bp X=æêìX™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-63067 1251822591/10000048BpX=ö%Þ™&IDMAP/GID2SID/10000047 1251824032/S-1-5-21-1834383793-1770918451-929701000-101189päÃX>/ø’™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-101189 1251824032/10000047pX= ‰Q™&IDMAP/GID2SID/10000046 1251823334/S-1-5-21-1834383793-1770918451-929701000-120736p8X>·Bî™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-120736 1251823334/10000046pÌ(fæþÙBBBBBBBBBBBBBBBBBBBB0ÄÄX=Ò1 ª™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-56596 1251823756/10000415BpX==O’™&IDMAP/GID2SID/10000120 1251822591/S-1-5-21-1834383793-1770918451-929701000-139639p´%X> óœ™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-139639 1251822591/10000120p|X=î°×™&IDMAP/GID2SID/10000119 1251822591/S-1-5-21-1834383793-1770918451-929701000-126385p”X>‡·E­™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-126385 1251822591/10000119pX=K™&IDMAP/GID2SID/10000118 1251822591/S-1-5-21-1834383793-1770918451-929701000-116156pÀ X>¯S ™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-116156 1251822591/10000118pp‰X=Bw¾™&IDMAP/GID2SID/10000117 1251822591/S-1-5-21-1834383793-1770918451-929701000-117588pÈYX>ß:!i™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-117588 1251822591/10000117p„X=lÚ1™&IDMAP/GID2SID/10000116 1251822591/S-1-5-21-1834383793-1770918451-929701000-126396pžX>Sõ™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-126396 1251822591/10000116pX=–=¥™&IDMAP/GID2SID/10000115 1251822591/S-1-5-21-1834383793-1770918451-929701000-159265p —X>Û?3™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-159265 1251822591/10000115pX<À ™&IDMAP/GID2SID/10000114 1251822591/S-1-5-21-1834383793-1770918451-929701000-90602BphšX=^ê:™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-90602 1251822591/10000114BpX=ꌙ&IDMAP/GID2SID/10000113 1251822591/S-1-5-21-1834383793-1770918451-929701000-126429phgX>3põ™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-126429 1251822591/10000113pX=gÿ™&IDMAP/GID2SID/10000112 1251822591/S-1-5-21-1834383793-1770918451-929701000-122069pèÓX>ƒ_Æ/™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-122069 1251822591/10000112pX=>Êr™&IDMAP/GID2SID/10000111 1251822591/S-1-5-21-1834383793-1770918451-929701000-122067px›X>ƒÿøe™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-122067 1251822591/10000111pX<h-æ™&IDMAP/GID2SID/10000110 1251822591/S-1-5-21-1834383793-1770918451-929701000-43400Bp¨ÎX=^jÜÈ™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-43400 1251822591/10000110BpsX<ÝL»™&IDMAP/GID2SID/10000109 1251822591/S-1-5-21-1834383793-1770918451-929701000-64115Bp`ÊX=Š_늙&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-64115 1251822591/10000109BpX=Ý1¯.™&IDMAP/GID2SID/10000108 1251822591/S-1-5-21-1834383793-1770918451-929701000-116427p˜ÒX>3ÖÜ™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-116427 1251822591/10000108pÜ0X<Ý[¢™&IDMAP/GID2SID/10000107 1251822591/S-1-5-21-1834383793-1770918451-929701000-64114BpÈX= *ħ™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-64114 1251822591/10000107BpX<Ý…u™&IDMAP/GID2SID/10000106 1251822591/S-1-5-21-1834383793-1770918451-929701000-93011Bp0X=Š‘Ýy™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-93011 1251822591/10000106BpX<ݯ؈™&IDMAP/GID2SID/10000105 1251822591/S-1-5-21-1834383793-1770918451-929701000-64116BppªX= •n™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-64116 1251822591/10000105BpX=ÝÙ;ü™&IDMAP/GID2SID/10000104 1251822591/S-1-5-21-1834383793-1770918451-929701000-114928p ÈX>àzÊ™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-114928 1251822591/10000104p”*X<ÝŸo™&IDMAP/GID2SID/10000103 1251822591/S-1-5-21-1834383793-1770918451-929701000-31602BpðÂX=^}¡Z™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-31602 1251822591/10000103BpX=Ý-ã™&IDMAP/GID2SID/10000102 1251822591/S-1-5-21-1834383793-1770918451-929701000-157066pˆ8X>ƒO3ï™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-157066 1251822591/10000102pX<Í):½™&IDMAP/UID2SID/10000058 1251822591/S-1-5-21-1834383793-1770918451-929701000-48064Bph+X=ÎÁ6™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-48064 1251822591/10000058Bp=X=m°6k™&IDMAP/UID2SID/10000036 1250712333/S-1-5-21-1834383793-1770918451-929701000-119393p,X>o´µC™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119393 1250712333/10000036pX=]z1p™&IDMAP/UID2SID/10000003 1250705447/S-1-5-21-1834383793-1770918451-929701000-119309p ÌX>ïò¼¤™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119309 1250705447/10000003pÜEX=½ÉÑN™&IDMAP/UID2SID/10000026 1250705448/S-1-5-21-1834383793-1770918451-929701000-119488pèýX>›()`™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119488 1250705448/10000026p4*X=—›‡™&IDMAP/UID2SID/10000046 1250878940/S-1-5-21-1834383793-1770918451-929701000-129585pø{X>GRù†™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-129585 1250878940/10000046pX=ÝÈÉ™&IDMAP/GID2SID/10000100 1251822591/S-1-5-21-1834383793-1770918451-929701000-112481pTX>31Ùœ™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-112481 1251822591/10000100pRX= Ð¥™&IDMAP/UID2SID/10000015 1250712334/S-1-5-21-1834383793-1770918451-929701000-119386p@@X>ïŽB™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119386 1250712334/10000015p`OX= ¦K™&IDMAP/UID2SID/10000018 1250718766/S-1-5-21-1834383793-1770918451-929701000-119388pÐX>ïîY™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119388 1250718766/10000018p@^X=mXÃ8™&IDMAP/UID2SID/10000032 1245283320/S-1-5-21-1834383793-1770918451-929701000-119485p`iX>›u1™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119485 1245283320/10000032pÀSX=mÚ™Þ™&IDMAP/UID2SID/10000035 1250643921/S-1-5-21-1834383793-1770918451-929701000-119304pX>ï‚;¬™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119304 1250643921/10000035pÌBX<؂ϙ&NBT/TAGWORLD.LOCAL#1C 1251233900/10.64.99.4:389,10.64.0.101:389,10.64.0.102:389BBp<<8"¹],™&AD_SITENAME/DOMAIN/DMZ.VIACOM.COM 4294967295/DMZBP$,SWVÞ™&AD_SITENAME/DOMAIN/DMZ 4294967295/DMZD@à4£®™&TDOMCACHE/TIMESTAMP 1250653195/1250652595.netLÚX=¢Ÿ91™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-95251 1250615979/10000348p€hH%åµT™&NBT/NETQAROOTDC1.NETQA.VIACOM.COM#20 1249211682/166.77.173.159:0B`@$´·†™&AD_SITENAME/DOMAIN/NETQA.VIACOM.COM 4294967295/NETQA-NYCBX4öþ™&AD_SITENAME/DOMAIN/NETQA 4294967295/NETQA-NYCL¤<'.*¯þ™&SAF/DOMAIN/AD 1251229419/musselbeach.ad.viacom.com89Tx–Dévu ™&NBT/BEAVIS.DMZ.VIACOM.COM#20 1245379451/166.77.8.196:0m.comB\ªL'Ī1Ê™&NBT/NETDEVROOTDC1.NETDEV.VIACOM.COM#20 1251229217/166.77.173.156:0BBBd°ŽD%áwœ3™&AD_SITENAME/DOMAIN/NETDEV.VIACOM.COM 4294967295/NETDEV-NYCBBB\Ð84g‚™&AD_SITENAME/DOMAIN/NETDEV 4294967295/NETDEV-NYCBBPP@ÆHÕ1fæþÙNBT/VAD01.TAGWORLD.LOCAL#20 1251228864/10.64.0.101:09BBBXð T)#‡ß‘™&AD_SITENAME/DOMAIN/HOSTING.AD.VIACOM.COM 4294967295/US-California-BurbankomBl,)L)f¬*™&NBT/MRSSTAR.VIACOM_CORP.AD.VIACOM.COM#20 1251229216/166.77.86.15:0dÀH.dO´Ž™&SAF/DOMAIN/MTVNASIA 1251229469/SQUILLIAM.mtvnasia.ad.viacom.comBB`›<ÆH@€™&NBT/VAD02.TAGWORLD.LOCAL#20 1250525385/10.64.0.102:0BTX#í¼K÷™&AD_SITENAME/DOMAIN/PLAYASUR 4294967295/US-California-Burbankmount.ad.viacom.comBpL‘L'hØb™&NBT/PERCHPERKINS.MTVN.AD.VIACOM.COM#20 1251234357/166.77.123.16:04:0dÌëL*ãH7y™&NBT/HASSELHOFF.PARAMOUNT.AD.VIACOM.COM#20 1249211622/166.77.172.108:0dDP#U³¦™&AD_SITENAME/DOMAIN/HOSTING 4294967295/US-California-Burbankd.viacom.comBhxjD,äï²#™&SAF/DOMAIN/PLAYASUR 1251229469/pancton.playasur.ad.viacom.com\ yH&û¯ÈÍ™&NBT/NEW-BRDC02.MTVNE.AD.VIACOM.COM#20 1244242447/166.77.172.124:0`±T&;Œ™&NBT/PANCTON.PLAYASUR.AD.VIACOM.COM#20 1251229216/166.77.172.128:0om.com.comlòH" —ÐÆ™&NBT/LOU.MTVNASIA.AD.VIACOM.COM#20 1249211618/166.77.172.115:0mBB` L(™÷ o™&SAF/DOMAIN/IPT.VIACOM.COM 1251229423/unitynyad02.ipt.viacom.com0:0BBdDX*#÷¿(™&AD_SITENAME/DOMAIN/PLAYASUR.AD.VIACOM.COM 4294967295/US-California-Burbankom.comp €L1åpT™&NBT/IPT.VIACOM.COM#1C 1251229043/172.21.200.28:389,172.21.200.27:389BdàBP,LÙÏØ™&NBT/KANGREBURGUER.PLAYASUR.AD.VIACOM.COM#20 1249211617/166.77.172.111:0BBhXr@‰à%Y™&NBT/BUTTHEAD.DMZ.VIACOM.COM#20 1251234328/166.77.8.197:0BXœJX= ¹ ¿™&IDMAP/UID2SID/10000017 1250712327/S-1-5-21-1834383793-1770918451-929701000-119365p X>ïs d™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119365 1250712327/10000017p¼8X=“î;™&IDMAP/UID2SID/10000040 1250878940/S-1-5-21-1834383793-1770918451-929701000-119391pð…X>oTèy™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119391 1250878940/10000040pœGX=½˜5™&IDMAP/UID2SID/10000024 1250705447/S-1-5-21-1834383793-1770918451-929701000-119377pˆùX>o ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119377 1250705447/10000024pX=]øZÊ™&IDMAP/UID2SID/10000000 1250714213/S-1-5-21-1834383793-1770918451-929701000-119487pðåX>›xBû™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119487 1250714213/10000000pX=½K¨ô™&IDMAP/UID2SID/10000029 1250812305/S-1-5-21-1834383793-1770918451-929701000-119385pX= µ\s™&IDMAP/UID2SID/10000011 1250643921/S-1-5-21-1834383793-1770918451-929701000-119308pÄX>ïBÖ?™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119308 1250643921/10000011pàYX=]ü™&IDMAP/UID2SID/10000006 1250712334/S-1-5-21-1834383793-1770918451-929701000-129609pè5X>óŸs™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-129609 1250712334/10000006pÀLX=Åá™&IDMAP/UID2SID/10000043 1250809544/S-1-5-21-1834383793-1770918451-929701000-119470p0žX>sÌU™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119470 1250809544/10000043pÀZX=]&k‰™&IDMAP/UID2SID/10000005 1250900348/S-1-5-21-1834383793-1770918451-929701000-119371pH3X>o陳™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119371 1250900348/10000005pÄ X= ‹ùÿ™&IDMAP/UID2SID/10000012 1250715320/S-1-5-21-1834383793-1770918451-929701000-119480pmX>›¨ó8™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119480 1250715320/10000012pð(X=½Gû¨™&IDMAP/UID2SID/10000023 1250712327/S-1-5-21-1834383793-1770918451-929701000-119475pdX>ãMN™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119475 1250712327/10000023pYX=]Ò¤¢™&IDMAP/UID2SID/10000007 1250712327/S-1-5-21-1834383793-1770918451-929701000-119368p(X>ïƒÁ’™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119368 1250712327/10000007pX=Í}¤™&IDMAP/UID2SID/10000056 1243615575/S-1-5-21-1834383793-1770918451-929701000-119272pȸt",1auõ™&SAF/DOMAIN/PLAYASUR.AD.VIACOM.COM 1251229469/pancton.playasur.ad.viacom.com 1251222444/166.77.173.152:3892.1Œ\4P'#;6\A™&AD_SITENAME/DOMAIN/MTVNE.AD.VIACOM.COM 4294967295/US-California-Burbank89h˜œ@#mæPž™&AD_SITENAME/DOMAIN/MTVNE 4294967295/US-California-BurbankXÐ}X/ƒ€™&SAF/DOMAIN/VIACOM_CORP 1251229458/mrsstar.viacom_corp.ad.viacom.com123:3898989p\L/ãÐÏñ™&NBT/DMZ.VIACOM.COM#1C 1251234328/166.77.8.196:389,166.77.8.197:389hd‘P-›Ph ™&SAF/DOMAIN/NETDEV.VIACOM.COM 1251229765/NETDEVROOTDC1.netdev.viacom.commhHÐX-í2àŒ™&SAF/DOMAIN/MTVN.AD.VIACOM.COM 1251234708/perchperkins.mtvn.ad.viacom.com01:389BBpüX= öGÆ™&IDMAP/GID2SID/10000094 1251309681/S-1-5-21-1834383793-1770918451-929701000-128556pˆ™X>_:©™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-128556 1251309681/10000094pÀtX< J­™&IDMAP/GID2SID/10000092 1251332444/S-1-5-21-1834383793-1770918451-929701000-56613Bp¨0X>â&{s™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-25443 1251822594/10000076pxX=ýA™¦™&IDMAP/GID2SID/10000164 1251824032/S-1-5-21-1834383793-1770918451-929701000-129541pðaX>ß”`™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-129541 1251824032/10000164pàsX<ýkü™&IDMAP/GID2SID/10000163 1250878940/S-1-5-21-1834383793-1770918451-929701000-79173BpX=’¦¾©™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-79173 1250878940/10000163Bp|:X< žÔ“™&IDMAP/GID2SID/10000090 1250872206/S-1-5-21-1834383793-1770918451-929701000-48068Bp X=fpD_™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-48068 1250872206/10000090Bp¼FX<ý•_™&IDMAP/GID2SID/10000162 1250868523/S-1-5-21-1834383793-1770918451-929701000-52245BpàªX=Ž´Mž™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-52245 1250868523/10000162BpX=]=óh™&IDMAP/GID2SID/10000089 1251309681/S-1-5-21-1834383793-1770918451-929701000-107847p`MX>ãa~>™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-107847 1251309681/10000089pX=]‘¹O™&IDMAP/GID2SID/10000087 1251309681/S-1-5-21-1834383793-1770918451-929701000-129709pHØX>7²+¬™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-129709 1251309681/10000087pX=]»Ù&IDMAP/GID2SID/10000086 1251309681/S-1-5-21-1834383793-1770918451-929701000-119269p`X>ÛÒ-™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-119269 1251309681/10000086p@‡X<ý¿Â™&IDMAP/GID2SID/10000161 1250878940/S-1-5-21-1834383793-1770918451-929701000-48092BpÈKX=jܤ!™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-48092 1250878940/10000161BpX<ÍÿÖI™&IDMAP/UID2SID/10000059 1250878940/S-1-5-21-1834383793-1770918451-929701000-95300BpHûX=ÆIô÷™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-95300 1250878940/10000059BpX=ÍS0™&IDMAP/UID2SID/10000057 1250784019/S-1-5-21-1834383793-1770918451-929701000-148285p°X>C¥#A™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148285 1250784019/10000057pX=½ŸnÛ™&IDMAP/UID2SID/10000027 1250705448/S-1-5-21-1834383793-1770918451-929701000-119380pöX>ïnÚ1™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119380 1250705448/10000027pX<M‰DI™&IDMAP/GID2SID/10000159 1250616246/S-1-5-21-1834383793-1770918451-929701000-66299BpÀ'X=ꈺ™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-66299 1250616246/10000159Bp0JT8M³§¼™&IDMAP/GID2SID/10000158 1250616246/S-1-5-21-59184239-814559844-636688714-6119BlhcT9'v³Î™&IDMAP/SID2GID/S-1-5-21-59184239-814559844-636688714-6119 1250616246/10000158Bl MX<MÝ 0™&IDMAP/GID2SID/10000157 1250616246/S-1-5-21-1834383793-1770918451-929701000-66402Bpø—X=^…Rß™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-66402 1250616246/10000157Bp€NX=Mn£™&IDMAP/GID2SID/10000156 1251822594/S-1-5-21-1834383793-1770918451-929701000-113647pPmX>‹î6I™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-113647 1251822594/10000156p`wX<M1Ñ™&IDMAP/GID2SID/10000155 1251822594/S-1-5-21-1834383793-1770918451-929701000-90580BpX=¾ïà™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-90580 1251822594/10000155Bp€…X=M[4Š™&IDMAP/GID2SID/10000154 1251822594/S-1-5-21-2140803266-1626024873-1299147156-15724pvX>æôØ™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-15724 1251822594/10000154pX=M…—ý™&IDMAP/GID2SID/10000153 1251822594/S-1-5-21-2140803266-1626024873-1299147156-20642pð\X>:êæ™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-20642 1251822594/10000153p€vX=M¯úp™&IDMAP/GID2SID/10000152 1251822594/S-1-5-21-2140803266-1626024873-1299147156-20643pÔ X>:šÍ~™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-20643 1251822594/10000152p X<MÙ]ä™&IDMAP/GID2SID/10000151 1251822594/S-1-5-21-4186143834-2626045635-1021053583-1212BpôX=Ÿ…ñš™&IDMAP/SID2GID/S-1-5-21-4186143834-2626045635-1021053583-1212 1251822594/10000151Bp€aX<MÁW™&IDMAP/GID2SID/10000150 1251822594/S-1-5-21-4186143834-2626045635-1021053583-1677Bpˆ#X='ëÌj™&IDMAP/SID2GID/S-1-5-21-4186143834-2626045635-1021053583-1677 1251822594/10000150Bp TX=¢ß,™&IDMAP/GID2SID/10000149 1251822594/S-1-5-21-2140803266-1626024873-1299147156-23995pÌX>¾²Ê5™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-23995 1251822594/10000149p€UX=ÌB ™&IDMAP/GID2SID/10000148 1251822594/S-1-5-21-2140803266-1626024873-1299147156-25492p€ÂX>b‚X~™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-25492 1251822594/10000148p`VX=ö¥™&IDMAP/GID2SID/10000147 1251822594/S-1-5-21-2140803266-1626024873-1299147156-31602pÖX>:©™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-31602 1251822594/10000147p`†X< ‡™&IDMAP/GID2SID/10000146 1251822594/S-1-5-21-4186143834-2626045635-1021053583-7613BpX=»Š™&IDMAP/SID2GID/S-1-5-21-4186143834-2626045635-1021053583-7613 1251822594/10000146BppYX=Jlú™&IDMAP/GID2SID/10000145 1251822594/S-1-5-21-2140803266-1626024873-1299147156-34646p¸’X>:ªl1™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-34646 1251822594/10000145pØ}X<tÏm™&IDMAP/GID2SID/10000144 1251822594/S-1-5-21-4186143834-2626045635-1021053583-1209Bp°X=sÂêÉ™&IDMAP/SID2GID/S-1-5-21-4186143834-2626045635-1021053583-1209 1251822594/10000144BpX=ž2á™&IDMAP/GID2SID/10000143 1251822594/S-1-5-21-2140803266-1626024873-1299147156-31603p`¯X>:Äû ™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-31603 1251822594/10000143pŒX<È•T™&IDMAP/GID2SID/10000142 1251822594/S-1-5-21-4186143834-2626045635-1021053583-7654Bph“X=Oצ‰™&IDMAP/SID2GID/S-1-5-21-4186143834-2626045635-1021053583-7654 1251822594/10000142Bp"X<òøÇ™&IDMAP/GID2SID/10000141 1251822594/S-1-5-21-4186143834-2626045635-1021053583-7615Bp@¾X=&iP™&IDMAP/SID2GID/S-1-5-21-4186143834-2626045635-1021053583-7615 1251822594/10000141Bp@jX=\;™&IDMAP/GID2SID/10000140 1251822594/S-1-5-21-2140803266-1626024873-1299147156-23175p`*X>^z²6™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-23175 1251822594/10000140p`]X=í»z™&IDMAP/GID2SID/10000139 1251822594/S-1-5-21-2106152344-1726899929-2013803672-40569pÐX>ª›=¼™&IDMAP/SID2GID/S-1-5-21-2106152344-1726899929-2013803672-40569 1251822594/10000139p@xX<íå݃™&IDMAP/GID2SID/10000138 1251822594/S-1-5-21-4186143834-2626045635-1021053583-7653BpìAX=Ï¡¦™&IDMAP/SID2GID/S-1-5-21-4186143834-2626045635-1021053583-7653 1251822594/10000138Bp<X=íA÷™&IDMAP/GID2SID/10000137 1251822594/S-1-5-21-2140803266-1626024873-1299147156-20644p X>:J´ã™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-20644 1251822594/10000137pü6X=í9¤j™&IDMAP/GID2SID/10000136 1251822594/S-1-5-21-2140803266-1626024873-1299147156-31606pH]X>:Ô¯<™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-31606 1251822594/10000136pX=ícÞ™&IDMAP/GID2SID/10000135 1251822594/S-1-5-21-2140803266-1626024873-1299147156-20641pˆ?X>::µ™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-20641 1251822594/10000135p¤X=íjQ™&IDMAP/GID2SID/10000134 1251822594/S-1-5-21-2106152344-1726899929-2013803672-12000p‚X>N 7i™&IDMAP/SID2GID/S-1-5-21-2106152344-1726899929-2013803672-12000 1251822594/10000134pÈ-X<í·ÍÄ™&IDMAP/GID2SID/10000133 1251822591/S-1-5-21-4186143834-2626045635-1021053583-1683Bp(X=ÓNIå™&IDMAP/SID2GID/S-1-5-21-4186143834-2626045635-1021053583-1683 1251822591/10000133BpX=íá08™&IDMAP/GID2SID/10000132 1251822591/S-1-5-21-1834383793-1770918451-929701000-129472pðoX>³«£ ™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-129472 1251822591/10000132pX=í ”«™&IDMAP/GID2SID/10000131 1251822591/S-1-5-21-1834383793-1770918451-929701000-116153p`½X>¯CeÝ™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-116153 1251822591/10000131p@'X=í5÷™&IDMAP/GID2SID/10000130 1251822591/S-1-5-21-1834383793-1770918451-929701000-111062pJX>ƒ¬Q™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-111062 1251822591/10000130p4~X<=Õô™&IDMAP/GID2SID/10000129 1251822591/S-1-5-21-1834383793-1770918451-929701000-75607Bp X=ÞèÞÍ™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-75607 1251822591/10000129Bpä X<=ÿxg™&IDMAP/GID2SID/10000128 1251822591/S-1-5-21-1834383793-1770918451-929701000-64118Bph¦X= a4™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-64118 1251822591/10000128Bp|3X==)ÜÚ™&IDMAP/GID2SID/10000127 1251822591/S-1-5-21-1834383793-1770918451-929701000-122070pXX>eц™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-122070 1251822591/10000127p¼1X==S?N™&IDMAP/GID2SID/10000126 1251822591/S-1-5-21-1834383793-1770918451-929701000-122076pplX>…9ä™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-122076 1251822591/10000126pX==}¢Á™&IDMAP/GID2SID/10000125 1251822591/S-1-5-21-1834383793-1770918451-929701000-126386pð¦X>‡g,™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-126386 1251822591/10000125pd X<=§5™&IDMAP/GID2SID/10000124 1251822591/S-1-5-21-1834383793-1770918451-929701000-63065BpP¤X=æž’™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-63065 1251822591/10000124BpX==Ñh¨™&IDMAP/GID2SID/10000123 1251822591/S-1-5-21-1834383793-1770918451-929701000-126395pqX>íl™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-126395 1251822591/10000123pÀzX<=ûË™&IDMAP/GID2SID/10000122 1251822591/S-1-5-21-1834383793-1770918451-929701000-52079Bp¨EX=’7v¥™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-52079 1251822591/10000122BpX<=%/™&IDMAP/GID2SID/10000121 1251822591/S-1-5-21-1834383793-1770918451-929701000-36648Bp` fæþÙBBBBBBBBBBBBBBBBBBBBBBBBBBBB8Œ"X>Çqˆ9™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-107559 1250873479/10000288ph|X=ÝÌð™&IDMAP/UID2SID/10000287 1250873530/S-1-5-21-1834383793-1770918451-929701000-107728pX>Ÿ”^9™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-107728 1250873530/10000287pØúX ‰K™&NBT/NETQA.VIACOM.COM#1C 1249211677/166.77.173.159:3890.101:389,10.64.0.102:389BBpxXX=Ý[’×™&IDMAP/UID2SID/10000285 1250521814/S-1-5-21-1834383793-1770918451-929701000-148213p˜PX>ÃÎCA™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148213 1250521814/10000285pèÅX=Ý…õJ™&IDMAP/UID2SID/10000284 1250521814/S-1-5-21-1834383793-1770918451-929701000-148250pÈôX>Ô,Ÿ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148250 1250521814/10000284p¬úX<ÍÿÖ´™&IDMAP/UID2SID/10000259 1251218400/S-1-5-21-1834383793-1770918451-929701000-79240BpHX=v€¾™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-79240 1251218400/10000259BpX= a–÷™&IDMAP/UID2SID/10000213 1250606578/S-1-5-21-1834383793-1770918451-929701000-129760p@+X>Ÿê[Ö™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-129760 1250606578/10000213pˆbX<Ý¥™&IDMAP/UID2SID/10000281 1247866677/S-1-5-21-1834383793-1770918451-929701000-14857Bp¸X=¢ _r™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-14857 1247866677/10000281Bp X;Ý-‚™&IDMAP/UID2SID/10000280 1251824050/S-1-5-21-1834383793-1770918451-929701000-1442BBp°½X<ùh=™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-1442 1251824050/10000280BBphƒX=-Í í™&IDMAP/UID2SID/10000279 1247866321/S-1-5-21-1834383793-1770918451-929701000-163142p(…X>—…¯™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-163142 1247866321/10000279pˆäX=-!gÔ™&IDMAP/UID2SID/10000277 1250193793/S-1-5-21-1834383793-1770918451-929701000-129310pð>X>oø2J™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-129310 1250193793/10000277p¸X=i þ™&IDMAP/UID2SID/10000141 1251215817/S-1-5-21-1834383793-1770918451-929701000-148074pè·X>kLãê™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148074 1251215817/10000141p˜;X=}de+™&IDMAP/UID2SID/10000266 1250703920/S-1-5-21-1834383793-1770918451-929701000-107875pðìX>Ëɇ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-107875 1250703920/10000266p¸]X=-Ésl™&IDMAP/UID2SID/10000173 1250875191/S-1-5-21-1834383793-1770918451-929701000-143675p°îX>sVw™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143675 1250875191/10000173pøÖX=}6Ul™&IDMAP/UID2SID/10000261 1251131452/S-1-5-21-1834383793-1770918451-929701000-107925p˜ŒX>÷÷Ü™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-107925 1251131452/10000261pø³X=}`¸ß™&IDMAP/UID2SID/10000260 1251131452/S-1-5-21-1834383793-1770918451-929701000-148486p€LX>›È<´™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148486 1251131452/10000260pºX=-:S™&IDMAP/UID2SID/10000171 1251132005/S-1-5-21-1834383793-1770918451-929701000-143717pQX>¯|õ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143717 1251132005/10000171pØÞX=r˜™&IDMAP/UID2SID/10000249 1251150711/S-1-5-21-1834383793-1770918451-929701000-148466pˆ¥X>›]îí™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148466 1251150711/10000249plX=Ý[¢™&IDMAP/UID2SID/10000185 1251218700/S-1-5-21-1834383793-1770918451-929701000-119280pp±X>C5Á*™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119280 1251218700/10000185p8ÕX=CÕ ™&IDMAP/UID2SID/10000248 1250611448/S-1-5-21-1834383793-1770918451-929701000-119202pP{X>CéTÛ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119202 1250611448/10000248p8˜X=m8™&IDMAP/UID2SID/10000247 1250611448/S-1-5-21-1834383793-1770918451-929701000-143665p0 X>ó Z”™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143665 1250611448/10000247p XX=½›AÅ™&IDMAP/UID2SID/10000121 1251215911/S-1-5-21-1834383793-1770918451-929701000-143824p¨eX>K ±™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143824 1251215911/10000121pP–X=½qÞQ™&IDMAP/UID2SID/10000122 1251149659/S-1-5-21-1834383793-1770918451-929701000-141231p`íX>ÃÙ$ ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-141231 1251149659/10000122pØ9X=­€q`™&IDMAP/GID2SID/10000378 1251132374/S-1-5-21-1834383793-1770918451-929701000-129606p,9X>‹h^v™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-129606 1251132374/10000378pØ2X=-ºˆ™&IDMAP/UID2SID/10000271 1251132374/S-1-5-21-1834383793-1770918451-929701000-148082pÈ!X>ë!=™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148082 1251132374/10000271pX=-Gü™&IDMAP/UID2SID/10000270 1250180739/S-1-5-21-1834383793-1770918451-929701000-148493pÌòX>î¯h™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148493 1250180739/10000270pHX=}`8ª™&IDMAP/UID2SID/10000160 1251246539/S-1-5-21-1834383793-1770918451-929701000-141061pðX>kœh™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119073 1251826461/10000269px¢X=-GÆ™&IDMAP/UID2SID/10000170 1250871957/S-1-5-21-1834383793-1770918451-929701000-141230p` X>Ã)>;™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-141230 1250871957/10000170ph$X=}ŽÈž™&IDMAP/UID2SID/10000265 1251153462/S-1-5-21-1834383793-1770918451-929701000-107874p@GX>Ë¡™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-107874 1251153462/10000265p8sX<­(þ-™&IDMAP/GID2SID/10000374 1250872602/S-1-5-21-1834383793-1770918451-929701000-43583BppœX=>Ø{x™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-43583 1250872602/10000374Bp˜½X=­Ra¡™&IDMAP/GID2SID/10000373 1250872602/S-1-5-21-1834383793-1770918451-929701000-148143pˆX>/?€™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-148143 1250872602/10000373p¸æX=­|Ä™&IDMAP/GID2SID/10000372 1250872602/S-1-5-21-1834383793-1770918451-929701000-129731pHVX>·Òk.™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-129731 1250872602/10000372pØóX=­¦'ˆ™&IDMAP/GID2SID/10000371 1250872602/S-1-5-21-1834383793-1770918451-929701000-129603p8X>‹XªG™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-129603 1250872602/10000371p`ßX<}⎅™&IDMAP/UID2SID/10000263 1251824050/S-1-5-21-1834383793-1770918451-929701000-17010BpÐÊX=rÃ( ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-17010 1251824050/10000263Bp8²X=} òø™&IDMAP/UID2SID/10000262 1250617401/S-1-5-21-1834383793-1770918451-929701000-129555p-X>DZƒÝ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-129555 1250617401/10000262pX=ÍS›™&IDMAP/UID2SID/10000257 1250110556/S-1-5-21-1834383793-1770918451-929701000-107873pœÛX>Ëiº·™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-107873 1250110556/10000257p¬X=Í)ºò™&IDMAP/UID2SID/10000158 1250268910/S-1-5-21-1834383793-1770918451-929701000-143724p¿X>ŸÔï©™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143724 1250268910/10000158pøX=ÍOðO™&IDMAP/UID2SID/10000251 1250611094/S-1-5-21-1834383793-1770918451-929701000-107839pøùX>˳…ˆ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-107839 1250611094/10000251p˜IX=ÍySÙ&IDMAP/UID2SID/10000250 1251132373/S-1-5-21-1834383793-1770918451-929701000-143670pèQX>sæÿ~™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143670 1251132373/10000250pø1X=]PÎg™&IDMAP/UID2SID/10000204 1251313187/S-1-5-21-1834383793-1770918451-929701000-148158phX>ÛH¿™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148158 1251313187/10000204p0ÁX=½ó4-™&IDMAP/UID2SID/10000225 1251145639/S-1-5-21-1834383793-1770918451-929701000-148423pÈðX>›w2™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148423 1251145639/10000225pXKX=½˜ ™&IDMAP/UID2SID/10000224 1251152545/S-1-5-21-1834383793-1770918451-929701000-107735p¨>X>ºÑí™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-107735 1251152545/10000224pXªX=ꌙ&IDMAP/UID2SID/10000191 1250010769/S-1-5-21-1834383793-1770918451-929701000-148314pèïX>o¸C­™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148314 1250010769/10000191pàeX=À ™&IDMAP/UID2SID/10000192 1248710990/S-1-5-21-1834383793-1770918451-929701000-148248p,X>Cß:ã™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148248 1248710990/10000192p(¡X=Á~0™&IDMAP/UID2SID/10000145 1251154820/S-1-5-21-1834383793-1770918451-929701000-119278pX>ÃÏn™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119278 1251154820/10000145p¸ôX=ëᣙ&IDMAP/UID2SID/10000144 1250188501/S-1-5-21-1834383793-1770918451-929701000-143890p@ÿX>ËÄ€S™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143890 1250188501/10000144p˜WX=½›Áú™&IDMAP/UID2SID/10000221 1247760644/S-1-5-21-1834383793-1770918451-929701000-148450px5X>_­™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148450 1247760644/10000221p QX=½Å$n™&IDMAP/UID2SID/10000220 1247760644/S-1-5-21-1834383793-1770918451-929701000-119105pxïX>—¿ï™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119105 1247760644/10000220pdX=]Ò¤ ™&IDMAP/UID2SID/10000207 1250699869/S-1-5-21-1834383793-1770918451-929701000-119263pLX>CÚ&“™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119263 1250699869/10000207pxQX=]¨Aš™&IDMAP/UID2SID/10000208 1250612201/S-1-5-21-1834383793-1770918451-929701000-119265pðýX>C:ô\™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119265 1250612201/10000208pœÔX=—›ò™&IDMAP/UID2SID/10000246 1250695746/S-1-5-21-1834383793-1770918451-929701000-129641pt+X>óõÚm™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-129641 1250695746/10000246p˜BX=Áþe™&IDMAP/UID2SID/10000245 1250695770/S-1-5-21-1834383793-1770918451-929701000-129706p X>ŸÉØà™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-129706 1250695770/10000245pX=“nq™&IDMAP/UID2SID/10000140 1250814800/S-1-5-21-1834383793-1770918451-929701000-148148pxX>—¥!Ü™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148148 1250814800/10000140pH™X=ÍÿV™&IDMAP/UID2SID/10000159 1250797242/S-1-5-21-1834383793-1770918451-929701000-143699p °X>sjÑ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143699 1250797242/10000159pX£X=½Å¤8™&IDMAP/UID2SID/10000120 1251141205/S-1-5-21-1834383793-1770918451-929701000-119191p X>áµk™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119191 1251141205/10000120pxýX<ݯ؈™&IDMAP/UID2SID/10000183 1249573430/S-1-5-21-1834383793-1770918451-929701000-95346Bp¨ÇX=vqDg™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-95346 1249573430/10000183BpÀãX=ëaÙ™&IDMAP/UID2SID/10000244 1251129712/S-1-5-21-1834383793-1770918451-929701000-143856pð×X>ËL$™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143856 1251129712/10000244pPCX=ÅL™&IDMAP/UID2SID/10000243 1250535879/S-1-5-21-1834383793-1770918451-929701000-143869pXâX>KT'6™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143869 1250535879/10000243pìÕX= ¹ *™&IDMAP/UID2SID/10000217 1251143308/S-1-5-21-1834383793-1770918451-929701000-129309p¾X>ïò'ó™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-129309 1251143308/10000217p¸hX= ãl™&IDMAP/UID2SID/10000216 1251143308/S-1-5-21-1834383793-1770918451-929701000-148406p`¼X>››™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148406 1251143308/10000216pØ\X=mý¼™&IDMAP/UID2SID/10000234 1251136486/S-1-5-21-1834383793-1770918451-929701000-119217pxCX>ÃŽý¶™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119217 1251136486/10000234pøbX=mÚ™I™&IDMAP/UID2SID/10000235 1251136486/S-1-5-21-1834383793-1770918451-929701000-119209pp£X>C¹£™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119209 1251136486/10000235p´ÁX=½K(*™&IDMAP/UID2SID/10000129 1250698952/S-1-5-21-1834383793-1770918451-929701000-148344p€EX>ïX¹V™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148344 1250698952/10000129pôÆX=} ò™&IDMAP/UID2SID/10000062 1249775312/S-1-5-21-1834383793-1770918451-929701000-129552pˆfX>ǡϮ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-129552 1249775312/10000062p˜õX=?(À™&IDMAP/UID2SID/10000242 1250796340/S-1-5-21-1834383793-1770918451-929701000-143683pàKX>ó+Û™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143683 1250796340/10000242ppzX=i‹3™&IDMAP/UID2SID/10000241 1250796340/S-1-5-21-1834383793-1770918451-929701000-143684p°ËX>óÛÁõ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143684 1250796340/10000241pØX=“&IDMAP/UID2SID/10000240 1250632584/S-1-5-21-1834383793-1770918451-929701000-108010pÐØX>kK±Ê™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-108010 1250632584/10000240pØX=m2 |™&IDMAP/UID2SID/10000239 1250810300/S-1-5-21-1834383793-1770918451-929701000-141234pеX>ÃéØÎ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-141234 1250810300/10000239p¨–X=m\pï™&IDMAP/UID2SID/10000238 1250810300/S-1-5-21-1834383793-1770918451-929701000-143812p°‡X>Ëx™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143812 1250810300/10000238pX<B÷ˆ™&IDMAP/UID2SID/10000095 1250561251/S-1-5-21-1834383793-1770918451-929701000-64587Bpp¿X=¦…Ë™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-64587 1250561251/10000095Bp¸®X<–½o™&IDMAP/UID2SID/10000093 1250727725/S-1-5-21-1834383793-1770918451-929701000-25489Bp0ÝX=¦W™™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-25489 1250727725/10000093BpÀmX=À ã™&IDMAP/UID2SID/10000092 1250606578/S-1-5-21-1834383793-1770918451-929701000-119082p€»X>ë!\æ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119082 1250606578/10000092pCX<êƒV™&IDMAP/UID2SID/10000091 1251822639/S-1-5-21-1834383793-1770918451-929701000-38137BpoX=J]e™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-38137 1251822639/10000091Bp@¨X=gÿ™&IDMAP/UID2SID/10000190 1250813974/S-1-5-21-1834383793-1770918451-929701000-141236p|ÕX>ÃI¦˜™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-141236 1250813974/10000190p0­X=ݳ…Ô™&IDMAP/UID2SID/10000189 1250006981/S-1-5-21-1834383793-1770918451-929701000-143811phŠX>ËÈ-Ÿ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143811 1250006981/10000189pÈœX<MÝ ›™&IDMAP/GID2SID/10000357 1251140486/S-1-5-21-1834383793-1770918451-929701000-86083BpxX=>8ª*™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-86083 1251140486/10000357BpØX<m.`0™&IDMAP/UID2SID/10000233 1251140486/S-1-5-21-1834383793-1770918451-929701000-26839Bp(¨X=JpÄù™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-26839 1251140486/10000233BpX X=mXã™&IDMAP/UID2SID/10000232 1250529960/S-1-5-21-1834383793-1770918451-929701000-129691pèöX>sŸÝ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-129691 1250529960/10000232p€ÞX=m‚&™&IDMAP/UID2SID/10000231 1251822074/S-1-5-21-1834383793-1770918451-929701000-119352p(BX>o.2R™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119352 1251822074/10000231pTX=m¬‰Š™&IDMAP/UID2SID/10000230 1251822074/S-1-5-21-1834383793-1770918451-929701000-119383pˆòX>ï~Ž`™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119383 1251822074/10000230pÈrX=}âP™&IDMAP/UID2SID/10000163 1251148046/S-1-5-21-1834383793-1770918451-929701000-129581p X>G’^ó™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-129581 1251148046/10000163px X=Bw¾™&IDMAP/UID2SID/10000195 1250805658/S-1-5-21-1834383793-1770918451-929701000-143716pÂX>ÿ•™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143716 1250805658/10000195p˜pX=½K¨_™&IDMAP/UID2SID/10000229 1251824350/S-1-5-21-1834383793-1770918451-929701000-141229p`?X>C$3ä™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-141229 1251824350/10000229p¢X=½u Ó™&IDMAP/UID2SID/10000228 1250548473/S-1-5-21-1834383793-1770918451-929701000-107737pà4X>Ÿ·™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-107737 1250548473/10000228phuX=í»z{™&IDMAP/GID2SID/10000339 1250813825/S-1-5-21-1834383793-1770918451-929701000-143889pP¹X>ã—™™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-143889 1250813825/10000339pð"X=lZü™&IDMAP/UID2SID/10000094 1250889449/S-1-5-21-1834383793-1770918451-929701000-107553p X>ÇQ Ü™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-107553 1250889449/10000094pX`X=m2F™&IDMAP/UID2SID/10000139 1250813825/S-1-5-21-1834383793-1770918451-929701000-143893p¸vX>ËÔ4‚™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143893 1250813825/10000139p¸ûX=Ý1¯.™&IDMAP/UID2SID/10000186 1250614472/S-1-5-21-1834383793-1770918451-929701000-119277phX>ÃÏè ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119277 1250614472/10000186p”ÉX=íåÝî™&IDMAP/GID2SID/10000338 1250784823/S-1-5-21-1834383793-1770918451-929701000-143937pX>fœf™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-143937 1250784823/10000338pX(X=½ŸnF™&IDMAP/UID2SID/10000227 1250525522/S-1-5-21-1834383793-1770918451-929701000-107554pÐßX>ÇA™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-107554 1250525522/10000227p¸™X=½Éѹ™&IDMAP/UID2SID/10000226 1250805470/S-1-5-21-1834383793-1770918451-929701000-143668páX>ó0Ù&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143668 1250805470/10000226p ÌX=½u‹™&IDMAP/UID2SID/10000128 1250804339/S-1-5-21-1834383793-1770918451-929701000-141233pXkX>Ã9òi™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-141233 1250804339/10000128p”ÂX=½Ÿî™&IDMAP/UID2SID/10000127 1250804339/S-1-5-21-1834383793-1770918451-929701000-143828ph\X>KΣD™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143828 1250804339/10000127p˜¨X=–=¥™&IDMAP/UID2SID/10000193 1251160436/S-1-5-21-1834383793-1770918451-929701000-148369phúX>ï3‰™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148369 1251160436/10000193pØuX=lÚ1™&IDMAP/UID2SID/10000194 1251132851/S-1-5-21-1834383793-1770918451-929701000-148249p¨sX>C!H™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148249 1251132851/10000194pð®X=ÝL»™&IDMAP/UID2SID/10000187 1250816421/S-1-5-21-1834383793-1770918451-929701000-148238p°¨X>é™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148238 1250816421/10000187p8LX=½Gû™&IDMAP/UID2SID/10000223 1251310037/S-1-5-21-1834383793-1770918451-929701000-129610pØŸX>s¥~_™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-129610 1251310037/10000223pMX<½q^‡™&IDMAP/UID2SID/10000222 1248878179/S-1-5-21-1834383793-1770918451-929701000-67570Bp@X=zÕàÉ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-67570 1248878179/10000222Bp,ÔX= eCC™&IDMAP/UID2SID/10000219 1249921474/S-1-5-21-1834383793-1770918451-929701000-148161phåX>—@!à™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148161 1249921474/10000219p¨X= ¦¶™&IDMAP/UID2SID/10000218 1249942550/S-1-5-21-1834383793-1770918451-929701000-148212ppûX>Ã]Ü™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148212 1249942550/10000218p³X=CUÖ™&IDMAP/UID2SID/10000148 1250871721/S-1-5-21-1834383793-1770918451-929701000-143685pÀCX>ó‹¨Z™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143685 1250871721/10000148ppsX=òb™&IDMAP/UID2SID/10000149 1250787383/S-1-5-21-1834383793-1770918451-929701000-119376pˆX>oY¬™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119376 1250787383/10000149p°àX= ‹ùj™&IDMAP/UID2SID/10000212 1250871163/S-1-5-21-1834383793-1770918451-929701000-143855p¨X>Ë^e¿™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143855 1250871163/10000212pÈ€X= µ\Þ™&IDMAP/UID2SID/10000211 1250871163/S-1-5-21-1834383793-1770918451-929701000-143863p°¡X>K4¿Ø™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143863 1250871163/10000211p€×X= ß¿Q™&IDMAP/UID2SID/10000210 1251131203/S-1-5-21-1834383793-1770918451-929701000-107752p(àX>l…™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-107752 1251131203/10000210pˆmX=]~Þ&™&IDMAP/UID2SID/10000209 1250795680/S-1-5-21-1834383793-1770918451-929701000-148117pà¸X>UÅÍ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148117 1250795680/10000209pLX<ÍOp™&IDMAP/UID2SID/10000151 1250873129/S-1-5-21-1834383793-1770918451-929701000-95184Bpè–X=&íš"™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-95184 1250873129/10000151Bpˆ‚X=ÄMd™&IDMAP/UID2SID/10000198 1250702675/S-1-5-21-1834383793-1770918451-929701000-143723pìêX>Ÿ$ E™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143723 1250702675/10000198pØ´X=—½™&IDMAP/UID2SID/10000146 1250810717/S-1-5-21-1834383793-1770918451-929701000-143721pP²X>ŸÄ;{™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143721 1250810717/10000146p ‹X=½ÉQ„™&IDMAP/UID2SID/10000126 1251147612/S-1-5-21-1834383793-1770918451-929701000-129575pX>ÇÒ£™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-129575 1251147612/10000126p¬ìX=]z±¥™&IDMAP/UID2SID/10000103 1251143308/S-1-5-21-1834383793-1770918451-929701000-143710pœX>ß-3™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143710 1251143308/10000103p@cX=]¤™&IDMAP/UID2SID/10000102 1251150089/S-1-5-21-1834383793-1770918451-929701000-143781pH X>Ÿ'Ι&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143781 1251150089/10000102pxÌX=m¸I™&IDMAP/UID2SID/10000147 1250810717/S-1-5-21-1834383793-1770918451-929701000-141060pø‚X>ëVÖ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-141060 1250810717/10000147p°·X=-!çž™&IDMAP/UID2SID/10000177 1250873609/S-1-5-21-1834383793-1770918451-929701000-143660pàÍX>ó°Ø›™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143660 1250873609/10000177pD/X<ýÕ%Å™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-9105 1251825058/100004191pð˜X>ëh;™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-141061 1251246539/10000160pöX=]Ò$Ø™&IDMAP/UID2SID/10000107 1250729750/S-1-5-21-1834383793-1770918451-929701000-119177p˜¯X>–Ï™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119177 1250729750/10000107p*X=]ü‡K™&IDMAP/UID2SID/10000106 1250698977/S-1-5-21-1834383793-1770918451-929701000-148446pø*X>›òŸ'™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148446 1250698977/10000106pøFX=]ü™&IDMAP/UID2SID/10000206 1250180781/S-1-5-21-1834383793-1770918451-929701000-107869p(;X>KTû1™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-107869 1250180781/10000206pbX=-÷ƒ+™&IDMAP/UID2SID/10000178 1250873574/S-1-5-21-1834383793-1770918451-929701000-129756p(WX>ÕœP™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-129756 1250873574/10000178p†X=]&kô™&IDMAP/UID2SID/10000205 1251313168/S-1-5-21-1834383793-1770918451-929701000-148135pNX>`FÊ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148135 1251313168/10000205p|HX=m°¶ ™&IDMAP/UID2SID/10000136 1251141206/S-1-5-21-1834383793-1770918451-929701000-119190p@2X>1Ï™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119190 1251141206/10000136pؘX=mÚ™&IDMAP/UID2SID/10000135 1251147048/S-1-5-21-1834383793-1770918451-929701000-143897p°¯X>˔ϙ&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143897 1251147048/10000135pÈyX=šêð™&IDMAP/UID2SID/10000199 1251143920/S-1-5-21-1834383793-1770918451-929701000-143708p8X>Ÿ)e±$™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143751 1251142437/10000203pðLX=}¸«Ü™&IDMAP/UID2SID/10000164 1250831042/S-1-5-21-1834383793-1770918451-929701000-129576pøÏX>Ç̸™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-129576 1250831042/10000164p@ÒX=Ý…u™&IDMAP/UID2SID/10000184 1250790279/S-1-5-21-1834383793-1770918451-929701000-107671pX!X>s–ºß™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-107671 1250790279/10000184p¨øX=-Ÿù™&IDMAP/UID2SID/10000174 1250807756/S-1-5-21-1834383793-1770918451-929701000-143676pðÞX>shÜ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143676 1250807756/10000174ppÔX=-óÖß™&IDMAP/UID2SID/10000172 1251823492/S-1-5-21-1834383793-1770918451-929701000-141064pdX>ëj™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-141064 1251823492/10000172p°ÄX=m¬ U™&IDMAP/UID2SID/10000130 1250726375/S-1-5-21-1834383793-1770918451-929701000-129567pPX>GGxŠ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-129567 1250726375/10000130p(šX= 7³N™&IDMAP/UID2SID/10000114 1251138273/S-1-5-21-1834383793-1770918451-929701000-107670p¸„X>sæÓz™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-107670 1251138273/10000114p0ÏX=K™&IDMAP/UID2SID/10000196 1251146640/S-1-5-21-1834383793-1770918451-929701000-141320pÀÇX>ï-0_™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-141320 1251146640/10000196pÈ£X= a™&IDMAP/UID2SID/10000113 1251138927/S-1-5-21-1834383793-1770918451-929701000-148100p  X>—OO(™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148100 1251138927/10000113pè¢X=Ý-ã™&IDMAP/UID2SID/10000180 1250898780/S-1-5-21-1834383793-1770918451-929701000-129755px†X>%¶ë™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-129755 1250898780/10000180pÀX=ÝÝèG™&IDMAP/UID2SID/10000188 1251824050/S-1-5-21-1834383793-1770918451-929701000-107606p0KX>ó)¢™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-107606 1251824050/10000188p0´X=]Î÷Á™&IDMAP/UID2SID/10000201 1250267973/S-1-5-21-1834383793-1770918451-929701000-143730ppÛX>J|ù™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143730 1250267973/10000201pø[X=]øZ5™&IDMAP/UID2SID/10000200 1250267973/S-1-5-21-1834383793-1770918451-929701000-143732p¸AX>ªIÙ&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143732 1250267973/10000200pÜçX=ÍSf™&IDMAP/UID2SID/10000157 1250630625/S-1-5-21-1834383793-1770918451-929701000-119178p¨ˆX>F¶g™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119178 1250630625/10000157p8X=î°×™&IDMAP/UID2SID/10000197 1248394430/S-1-5-21-1834383793-1770918451-929701000-107832pP.X>Ëã6Æ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-107832 1248394430/10000197p0¦X=]PN2™&IDMAP/UID2SID/10000104 1251149659/S-1-5-21-1834383793-1770918451-929701000-107712p@ÄX>?Ïø™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-107712 1251149659/10000104p nX=ÝÙ;ü™&IDMAP/UID2SID/10000182 1250527870/S-1-5-21-1834383793-1770918451-929701000-143707p¸ X>ŸyU™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143707 1250527870/10000182p[X=ÝŸo™&IDMAP/UID2SID/10000181 1250527870/S-1-5-21-1834383793-1770918451-929701000-143712pXX>?ûü™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143712 1250527870/10000181p0ÖX=-KJ™&IDMAP/UID2SID/10000176 1250644308/S-1-5-21-1834383793-1770918451-929701000-107782pLïX>Ÿµá.™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-107782 1250644308/10000176pØgX=-u­…™&IDMAP/UID2SID/10000175 1251158093/S-1-5-21-1834383793-1770918451-929701000-107828pˆ1X>KÎw@™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-107828 1251158093/10000175pP<X=}™&IDMAP/UID2SID/10000168 1250816033/S-1-5-21-1834383793-1770918451-929701000-107751pØìX>e… ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-107751 1250816033/10000168p8eX=}:‚‚™&IDMAP/UID2SID/10000167 1250700661/S-1-5-21-1834383793-1770918451-929701000-107750pŒæX>µž»™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-107750 1250700661/10000167p06X<}dåõ™&IDMAP/UID2SID/10000166 1251146665/S-1-5-21-1834383793-1770918451-929701000-95305BphX=FU¸g™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-95305 1251146665/10000166BpèqX=}ŽHi™&IDMAP/UID2SID/10000165 1251147272/S-1-5-21-1834383793-1770918451-929701000-107702pàÆX>Ÿ ¨™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-107702 1251147272/10000165pH X=} rÙ&IDMAP/UID2SID/10000162 1250698610/S-1-5-21-1834383793-1770918451-929701000-107771p KX>ÐÓæ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-107771 1250698610/10000162pÔÀX=}6Õ6™&IDMAP/UID2SID/10000161 1250698668/S-1-5-21-1834383793-1770918451-929701000-107785p ÝX>ŸÅ•]™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-107785 1250698668/10000161p¼ÚX<ͧãL™&IDMAP/UID2SID/10000155 1251149160/S-1-5-21-1834383793-1770918451-929701000-95370BpXyX=zݤ)™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-95370 1251149160/10000155Bp™X=ÍÑFÀ™&IDMAP/UID2SID/10000154 1250796526/S-1-5-21-1834383793-1770918451-929701000-129547p,2X>GÜ)Ä™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-129547 1250796526/10000154p¤ŸX=Íû©3™&IDMAP/UID2SID/10000153 1250715849/S-1-5-21-1834383793-1770918451-929701000-141242pÞX>C¿2è™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-141242 1250715849/10000153p˜šX=Í% §™&IDMAP/UID2SID/10000152 1250787006/S-1-5-21-1834383793-1770918451-929701000-143835pèX>Ëóù™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143835 1250787006/10000152p¸VX=íAb™&IDMAP/GID2SID/10000337 1251157458/S-1-5-21-1834383793-1770918451-929701000-129692pÆX> Š$ß™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-129692 1251157458/10000337pLáX= ãìg™&IDMAP/UID2SID/10000116 1250270826/S-1-5-21-1834383793-1770918451-929701000-143752pÈ6X>˜‰™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143752 1250270826/10000116ph˜X= PÛ™&IDMAP/UID2SID/10000115 1250696220/S-1-5-21-1834383793-1770918451-929701000-143785phÂX>ŸÅÁa™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143785 1250696220/10000115pŽX=šj»™&IDMAP/UID2SID/10000099 1251824050/S-1-5-21-1834383793-1770918451-929701000-148092p !X>kWdç™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148092 1251824050/10000099ppéX=m\ð¹™&IDMAP/UID2SID/10000138 1250642977/S-1-5-21-1834383793-1770918451-929701000-143880pH„X>KYp™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143880 1250642977/10000138p¸¼X=m†S-™&IDMAP/UID2SID/10000137 1251143370/S-1-5-21-1834383793-1770918451-929701000-119216pXŽX>ÃÞR™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119216 1251143370/10000137p¨ñX=m}‡™&IDMAP/UID2SID/10000134 1250703805/S-1-5-21-1834383793-1770918451-929701000-119291pˆÈX>ÃÏr™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119291 1250703805/10000134p X=m.àú™&IDMAP/UID2SID/10000133 1250870800/S-1-5-21-1834383793-1770918451-929701000-119282p°ÙX>C•Žô™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119282 1250870800/10000133p(~X=mXCn™&IDMAP/UID2SID/10000132 1251133406/S-1-5-21-1834383793-1770918451-929701000-107781p€˜X>ŸûÉ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-107781 1251133406/10000132pСX=m‚¦á™&IDMAP/UID2SID/10000131 1251133406/S-1-5-21-1834383793-1770918451-929701000-129711p€ŸX>~Ë™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-129711 1251133406/10000131p0uX<]&ë¾™&IDMAP/UID2SID/10000105 1250880279/S-1-5-21-1834383793-1770918451-929701000-95165Bp˜ÄX=N¯÷™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-95165 1250880279/10000105Bp˜iX=½k™&IDMAP/UID2SID/10000124 1250698821/S-1-5-21-1834383793-1770918451-929701000-119208p˜X>C ½8™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119208 1250698821/10000124pèjX=½G{Þ™&IDMAP/UID2SID/10000123 1250698821/S-1-5-21-1834383793-1770918451-929701000-143882p.X>Kï&:™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143882 1250698821/10000123p¬ÞX< &™&IDMAP/UID2SID/10000118 1250790522/S-1-5-21-1834383793-1770918451-929701000-95343BpPtX=öÐν™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-95343 1250790522/10000118BphnX= ¹‰ô™&IDMAP/UID2SID/10000117 1250874996/S-1-5-21-1834383793-1770918451-929701000-148463pæX>›M:¿™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148463 1250874996/10000117p¨X= ‹y5™&IDMAP/UID2SID/10000112 1250713299/S-1-5-21-1834383793-1770918451-929701000-143892p¨7X>Ë$N™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143892 1250713299/10000112pˆX= µÜ¨™&IDMAP/UID2SID/10000111 1248831893/S-1-5-21-1834383793-1770918451-929701000-107738p =X>Ê…™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-107738 1248831893/10000111p˜•X<í9¤Õ™&IDMAP/GID2SID/10000336 1250868523/S-1-5-21-1834383793-1770918451-929701000-38184BpüÑX=¾µÆ{™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-38184 1250868523/10000336Bp(ŒX<”™&IDMAP/UID2SID/10000096 1251825509/S-1-5-21-1834383793-1770918451-929701000-79416Bp€­X=rô™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-79416 1251825509/10000096Bph‘X= ß?™&IDMAP/UID2SID/10000110 1250868523/S-1-5-21-1834383793-1770918451-929701000-129710pø‰X>ß—f™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-129710 1250868523/10000110p(wX=]~^ñ™&IDMAP/UID2SID/10000109 1250785574/S-1-5-21-1834383793-1770918451-929701000-119192p(üX>‘œÐ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119192 1250785574/10000109pxX=]¨Ád™&IDMAP/UID2SID/10000108 1250785574/S-1-5-21-1834383793-1770918451-929701000-119198p`µX>±.™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119198 1250785574/10000108p¸§X=]ÎwŒ™&IDMAP/UID2SID/10000101 1251131675/S-1-5-21-1834383793-1770918451-929701000-141243pèÚX>CoM™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-141243 1251131675/10000101pð¼X=]øÚÿ™&IDMAP/UID2SID/10000100 1251131674/S-1-5-21-1834383793-1770918451-929701000-143819pȪX>ËHcÆ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143819 1251131674/10000100p [X=î0¢™&IDMAP/UID2SID/10000097 1250536200/S-1-5-21-1834383793-1770918451-929701000-148128pˆùX>—:Ó™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148128 1250536200/10000097p€oX=çÉ™&IDMAP/UID2SID/10000090 1249923102/S-1-5-21-1834383793-1770918451-929701000-119181p@îX>—«Žˆ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119181 1249923102/10000090pH’X=ݳŸ™&IDMAP/UID2SID/10000089 1250611094/S-1-5-21-1834383793-1770918451-929701000-148421p1X>›Ðh™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148421 1250611094/10000089pô¿X=]·oâ™&IDMAP/GID2SID/10000280 1250538396/S-1-5-21-1834383793-1770918451-929701000-101680p°qX>lô–™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-128970 1251822591/10000281pX=­VŽ·™&IDMAP/GID2SID/10000279 1250538396/S-1-5-21-1834383793-1770918451-929701000-101669pèX=] o™&IDMAP/GID2SID/10000281 1251822591/S-1-5-21-1834383793-1770918451-929701000-128970pè›X<­€ñ*™&IDMAP/GID2SID/10000278 1251246560/S-1-5-21-1834383793-1770918451-929701000-25224BpÈâX=¶Û™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-25224 1251246560/10000278Bp`X<ÝÝh™&IDMAP/UID2SID/10000088 1251824050/S-1-5-21-1834383793-1770918451-929701000-66635Bp@çX=Jú4œ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-66635 1251824050/10000088BpøºX<­Ô·™&IDMAP/GID2SID/10000276 1246951403/S-1-5-21-1834383793-1770918451-929701000-39695Bpˆ{X=ê$R·™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-39695 1246951403/10000276BpØ»X<­þ…™&IDMAP/GID2SID/10000275 1246951403/S-1-5-21-1834383793-1770918451-929701000-39694BptÃX=jï*Ô™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-39694 1246951403/10000275Bp”X<ÝÌ…™&IDMAP/UID2SID/10000087 1246951403/S-1-5-21-1834383793-1770918451-929701000-87059Bp8«X=¢ó™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-87059 1246951403/10000087Bp˜¶X=Íû)þ™&IDMAP/UID2SID/10000053 1250286053/S-1-5-21-1834383793-1770918451-929701000-119356pxöX>oîÌå™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119356 1250286053/10000053pø¬(îò“º™&IDMAP/UID2SID/10000 1246297471/-B@X—(îò?ô™&IDMAP/UID2SID/10001 1246297471/-B@À¬X=-!gi™&IDMAP/UID2SID/10000077 1251824050/S-1-5-21-1834383793-1770918451-929701000-107858p(4X>Ëníé™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-107858 1251824050/10000077p `X=-KÊÜ™&IDMAP/UID2SID/10000076 1251143169/S-1-5-21-1834383793-1770918451-929701000-129686pXþX>ó;ùò™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-129686 1251143169/10000076pÐ]X<Ý1/ù™&IDMAP/UID2SID/10000086 1251824050/S-1-5-21-1834383793-1770918451-929701000-95888Bp˜&X=&Ã×L™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-95888 1251824050/10000086BpÜ#X=Ý[’l™&IDMAP/UID2SID/10000085 1251310325/S-1-5-21-1834383793-1770918451-929701000-107683p»X>ó+¯Œ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-107683 1251310325/10000085pÜ)X=Ý…õß™&IDMAP/UID2SID/10000084 1250642961/S-1-5-21-1834383793-1770918451-929701000-107767pàéX>Ÿºa™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-107767 1250642961/10000084p§X=ý•ß™&IDMAP/GID2SID/10000262 1251824032/S-1-5-21-1834383793-1770918451-929701000-113648pà•X>‹ž®™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-113648 1251824032/10000262p4ÅX=ý¿B6™&IDMAP/GID2SID/10000261 1250642960/S-1-5-21-1834383793-1770918451-929701000-148342p0=X>‡ÑŠ)™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-148342 1250642960/10000261p˜“X<ݯXS™&IDMAP/UID2SID/10000083 1251824050/S-1-5-21-1834383793-1770918451-929701000-66812BpШX=ý饩™&IDMAP/GID2SID/10000260 1250802575/S-1-5-21-1834383793-1770918451-929701000-119218pЮX>[ƒ¸™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-119218 1250802575/10000260pà&X<Ý-‚­™&IDMAP/UID2SID/10000080 1251824050/S-1-5-21-1834383793-1770918451-929701000-79421BpPX=žBs‹™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-79421 1251824050/10000080Bpx©X=-Í ‚™&IDMAP/UID2SID/10000079 1250267446/S-1-5-21-1834383793-1770918451-929701000-119274pp&X>ÿ4Û™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119274 1250267446/10000079p¸”X=M1QL™&IDMAP/GID2SID/10000255 1250632584/S-1-5-21-1834383793-1770918451-929701000-129739pÐæX>·R¡U™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-129739 1250632584/10000255pü=X=M[´¿™&IDMAP/GID2SID/10000254 1251822639/S-1-5-21-1834383793-1770918451-929701000-119284p@¶X>ÛÍúZ™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-119284 1251822639/10000254pëX<-Éó6™&IDMAP/UID2SID/10000073 1251130186/S-1-5-21-1834383793-1770918451-929701000-55147Bph´X=öFÞå™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-55147 1251130186/10000073Bp°¾X=-÷ö™&IDMAP/UID2SID/10000078 1250871873/S-1-5-21-1834383793-1770918451-929701000-143901pl7X>÷ÌÙ&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143901 1250871873/10000078p'X<-u-P™&IDMAP/UID2SID/10000075 1250893808/S-1-5-21-1834383793-1770918451-929701000-66597Bp0(X=R¿º¿™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-66597 1250893808/10000075BpØ­X=-ŸÃ™&IDMAP/UID2SID/10000074 1251310813/S-1-5-21-1834383793-1770918451-929701000-148455pH®X>xख़&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148455 1251310813/10000074p`€X=-óVª™&IDMAP/UID2SID/10000072 1247681305/S-1-5-21-1834383793-1770918451-929701000-148484pØX>›hoê™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148484 1247681305/10000072pXðX=-º™&IDMAP/UID2SID/10000071 1250274070/S-1-5-21-1834383793-1770918451-929701000-148482p¨lX>›¢ ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148482 1250274070/10000071p@°X=-G‘™&IDMAP/UID2SID/10000070 1251822623/S-1-5-21-1834383793-1770918451-929701000-148479p8øX>£Éÿ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148479 1251822623/10000070p7m)Ú™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-143728 1251132054/10000248pÜ~X=ö%I™&IDMAP/GID2SID/10000247 1250805470/S-1-5-21-1834383793-1770918451-929701000-136387pjX>‡~Å™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-136387 1250805470/10000247püDX= ‰¼™&IDMAP/GID2SID/10000246 1250805470/S-1-5-21-1834383793-1770918451-929701000-136388p˜¡X>‡Çd*™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-136388 1250805470/10000246p°¢X=Jì/™&IDMAP/GID2SID/10000245 1250793789/S-1-5-21-1834383793-1770918451-929701000-143907pHoX>Å&½™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-143907 1250793789/10000245p£X=tO£™&IDMAP/GID2SID/10000244 1250971642/S-1-5-21-1834383793-1770918451-929701000-129618p`§X> þR#™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-129618 1250971642/10000244pp¤X=ž²™&IDMAP/GID2SID/10000243 1250805470/S-1-5-21-1834383793-1770918451-929701000-129662p­X>‹é®5™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-129662 1250805470/10000243pP¥X=ÈŠ™&IDMAP/GID2SID/10000242 1250805470/S-1-5-21-1834383793-1770918451-929701000-107844p´X>ãQÊ™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-107844 1250805470/10000242pÈŽX<òxý™&IDMAP/GID2SID/10000241 1250805470/S-1-5-21-1834383793-1770918451-929701000-42608BphX=^k.™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-42608 1250805470/10000241Bpø¥X=Üp™&IDMAP/GID2SID/10000240 1251822639/S-1-5-21-1834383793-1770918451-929701000-107746pÈþX>7x~Ò™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-107746 1251822639/10000240pð§X=í»úE™&IDMAP/GID2SID/10000239 1250805470/S-1-5-21-1834383793-1770918451-929701000-143774pàÛX>·¸R¶™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-143774 1250805470/10000239pÜ7X=íå]¹™&IDMAP/GID2SID/10000238 1250805470/S-1-5-21-1834383793-1770918451-929701000-129747pØŠX>7(ûn™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-129747 1250805470/10000238pdÀX=íÁ,™&IDMAP/GID2SID/10000237 1250906407/S-1-5-21-1834383793-1770918451-929701000-129198pÀÀX>¯‰™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-129198 1250906407/10000237pªX=í9$ ™&IDMAP/GID2SID/10000236 1250971642/S-1-5-21-1834383793-1770918451-929701000-143687ph9X>‹ÄÁ™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-143687 1250971642/10000236pp«X<íc‡™&IDMAP/GID2SID/10000235 1251135589/S-1-5-21-1834383793-1770918451-929701000-42572Bp@šX=iÐ?™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-42572 1251135589/10000235BpP¬X=íꆙ&IDMAP/GID2SID/10000234 1251157458/S-1-5-21-1834383793-1770918451-929701000-148353pøX>·˜q™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-148353 1251157458/10000234p<5X=í·Mú™&IDMAP/GID2SID/10000233 1250805470/S-1-5-21-1834383793-1770918451-929701000-129661p ÓX>‹9ÈЙ&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-129661 1250805470/10000233p®X=íá°m™&IDMAP/GID2SID/10000232 1251822639/S-1-5-21-1834383793-1770918451-929701000-107691pH”X> Ú§B™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-107691 1251822639/10000232p`bX=í á™&IDMAP/GID2SID/10000231 1250805470/S-1-5-21-1834383793-1770918451-929701000-107778pèX>·xÁE™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-107778 1250805470/10000231p¤X=í5wT™&IDMAP/GID2SID/10000230 1250805470/S-1-5-21-1834383793-1770918451-929701000-108018pè X>¤…Ž™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-108018 1250805470/10000230p¤ÏX==Õ•)™&IDMAP/GID2SID/10000229 1250805470/S-1-5-21-1834383793-1770918451-929701000-129663p´X>‹™•š™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-129663 1250805470/10000229p±X<=ÿøœ™&IDMAP/GID2SID/10000228 1251135589/S-1-5-21-1834383793-1770918451-929701000-42573BpˆX=’ž÷"™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-42573 1251135589/10000228Bpp²X<=)\™&IDMAP/GID2SID/10000227 1250630963/S-1-5-21-1834383793-1770918451-929701000-42595BpH:X=ê|x÷™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-42595 1250630963/10000227BpP³X==S¿ƒ™&IDMAP/GID2SID/10000226 1250805470/S-1-5-21-1834383793-1770918451-929701000-129721phX>7DK™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-129721 1250805470/10000226pPŠX==}"÷™&IDMAP/GID2SID/10000225 1250805470/S-1-5-21-1834383793-1770918451-929701000-129664p×X>‹I|ÿ™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-129664 1250805470/10000225pµX==§…j™&IDMAP/GID2SID/10000224 1250805470/S-1-5-21-1834383793-1770918451-929701000-129660p¢X>‹‰ák™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-129660 1250805470/10000224pHhX==ÑèÝ™&IDMAP/GID2SID/10000223 1251826514/S-1-5-21-1834383793-1770918451-929701000-148396pÀ³X>é,™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-148396 1251826514/10000223pжX==ûKQ™&IDMAP/GID2SID/10000222 1250805470/S-1-5-21-1834383793-1770918451-929701000-107766ph@X>7ã̘™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-107766 1250805470/10000222p kX<=%¯Ä™&IDMAP/GID2SID/10000221 1250805470/S-1-5-21-1834383793-1770918451-929701000-79444BpÀ„X=/ºß™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-79444 1250805470/10000221BpX<=O8™&IDMAP/GID2SID/10000220 1250805470/S-1-5-21-1834383793-1770918451-929701000-79394BpÌ X=jOØ5™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-79394 1250805470/10000220Bpp¹X<î0 ™&IDMAP/GID2SID/10000219 1250805470/S-1-5-21-1834383793-1770918451-929701000-95350Bp žX=ºB¸™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-95350 1250805470/10000219BpPºX=”€™&IDMAP/GID2SID/10000218 1251822639/S-1-5-21-1834383793-1770918451-929701000-141420p@½X>3@è™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-141420 1251822639/10000218pÄX<B÷ó™&IDMAP/GID2SID/10000217 1251132054/S-1-5-21-1834383793-1770918451-929701000-42589Bpˆ³X=>ü|™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-42589 1251132054/10000217Bp¼X<lZg™&IDMAP/GID2SID/10000216 1251824032/S-1-5-21-1834383793-1770918451-929701000-42591Bp 3X=ê¦Ûj™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-42591 1251824032/10000216Bp„X=–½Ú™&IDMAP/GID2SID/10000215 1250538396/S-1-5-21-1834383793-1770918451-929701000-148315pTX=¥R™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-36648 1251822591/10000121pнX=À N™&IDMAP/GID2SID/10000214 1251822639/S-1-5-21-1834383793-1770918451-929701000-113149pèáX>/.†ï™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-113149 1251822639/10000214pìƒX=êƒÁ™&IDMAP/GID2SID/10000213 1250805470/S-1-5-21-1834383793-1770918451-929701000-107779pÀ«X>·(¨ª™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-107779 1250805470/10000213p¿X=ç4™&IDMAP/GID2SID/10000212 1251135589/S-1-5-21-1834383793-1770918451-929701000-129587ph»X>ߊeí™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-129587 1251135589/10000212p ¬X<}¸+§™&IDMAP/UID2SID/10000064 1251824050/S-1-5-21-1834383793-1770918451-929701000-75911BpXX=òŽM™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-75911 1251824050/10000064Bp uX=}ŽÈ3™&IDMAP/UID2SID/10000065 1249586553/S-1-5-21-1834383793-1770918451-929701000-143853p`X>Ëþ—õ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143853 1249586553/10000065p€´X=M…3™&IDMAP/GID2SID/10000253 1250729595/S-1-5-21-1834383793-1770918451-929701000-148477p¼èX>³›Ò™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-148477 1250729595/10000253p„ÌX<M¯z¦™&IDMAP/GID2SID/10000252 1246408046/S-1-5-21-1834383793-1770918451-929701000-36764Bp}X=fBþ ™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-36764 1246408046/10000252Bpx|X=}ŸÙ™&IDMAP/UID2SID/10000068 1246408046/S-1-5-21-1834383793-1770918451-929701000-148476pXžX>“Ñ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148476 1246408046/10000068p¸X:MÙÝ™&IDMAP/GID2SID/10000251 1246407434/S-1-5-21-1834383793-1770918451-929701000-512BBBphŸX;ÔÑW™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-512 1246407434/10000251BBBpX= xl™&IDMAP/GID2SID/10000097 1251309681/S-1-5-21-1834383793-1770918451-929701000-126402p`X>35ql™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-126402 1251309681/10000097p8¹X= ¢ß™&IDMAP/GID2SID/10000096 1251309681/S-1-5-21-1834383793-1770918451-929701000-126401p¨¹X>3…Š™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-126401 1251309681/10000096pÜX= «9™&IDMAP/GID2SID/10000093 1251309681/S-1-5-21-1834383793-1770918451-929701000-126399pX‡X>­$™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-126399 1251309681/10000093pX= tq ™&IDMAP/GID2SID/10000091 1251822074/S-1-5-21-1834383793-1770918451-929701000-119075pÔX>ՇΙ&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-119075 1251822074/10000091pX=]gVÜ™&IDMAP/GID2SID/10000088 1250731374/S-1-5-21-1834383793-1770918451-929701000-107716pPÎX>·×)™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-107716 1250731374/10000088pŒX=]å6™&IDMAP/GID2SID/10000085 1251309681/S-1-5-21-1834383793-1770918451-929701000-119293pIX>[S;Ù™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-119293 1251309681/10000085p6X= ÌäR™&IDMAP/GID2SID/10000095 1251822639/S-1-5-21-1834383793-1770918451-929701000-143843pPfX>㡯™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-143843 1251822639/10000095p¸µX=ÍÑÆŠ™&IDMAP/UID2SID/10000054 1250785101/S-1-5-21-1834383793-1770918451-929701000-148281pøžX>C刭™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148281 1250785101/10000054pX=ͧc™&IDMAP/UID2SID/10000055 1246406429/S-1-5-21-1834383793-1770918451-929701000-107798pôX> qo™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-107798 1246406429/10000055p`pX=}:M™&IDMAP/UID2SID/10000067 1250012412/S-1-5-21-1834383793-1770918451-929701000-143891p ÜX>Ëtg¸™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143891 1250012412/10000067p _X<MA™&IDMAP/GID2SID/10000250 1251825058/S-1-5-21-1834383793-1770918451-929701000-46573BpˆtX=’ž£\™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-46573 1251825058/10000250Bp8ŸX<¢_b™&IDMAP/GID2SID/10000249 1251153193/S-1-5-21-1834383793-1770918451-929701000-38052Op¨)X=ºÌÒ™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-38052 1251153193/100002496pX6X<}deÀ™&IDMAP/UID2SID/10000066 1251148458/S-1-5-21-1834383793-1770918451-929701000-79085BpðŒX=ÝÌð™&IDMAP/GID2SID/10000209 1250949070/S-1-5-21-1834383793-1770918451-929701000-137093p¸X>àÍ™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-137093 1250949070/10000209pÀ<X=Ý1/d™&IDMAP/GID2SID/10000208 1250949070/S-1-5-21-1834383793-1770918451-929701000-127288pxX>Û@¢™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-127288 1250949070/10000208p„ X=Ý[’×™&IDMAP/GID2SID/10000207 1250949070/S-1-5-21-1834383793-1770918451-929701000-137092pÜÙX>08h™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-137092 1250949070/10000207peX=Ý…õJ™&IDMAP/GID2SID/10000206 1250949070/S-1-5-21-1834383793-1770918451-929701000-127289pH¼X>Û='™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-127289 1250949070/10000206p˜X<ݯX¾™&IDMAP/GID2SID/10000205 1250949070/S-1-5-21-1834383793-1770918451-929701000-91520BpЙX=¶•Ù™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-91520 1250949070/10000205BpÀfX<ÝÙ»1™&IDMAP/GID2SID/10000204 1250949070/S-1-5-21-1834383793-1770918451-929701000-88064BpHµX=fútœ™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-88064 1250949070/10000204Bp gX<Ý¥™&IDMAP/GID2SID/10000203 1250949070/S-1-5-21-1834383793-1770918451-929701000-70905BpÈ X=Þ}™ç™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-70905 1250949070/10000203BpÀ{X=Ý-‚™&IDMAP/GID2SID/10000202 1250949070/S-1-5-21-2140803266-1626024873-1299147156-24034p( X>²ºu ™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-24034 1250949070/10000202p°xX=ÝWå‹™&IDMAP/GID2SID/10000201 1250949070/S-1-5-21-2106152344-1726899929-2013803672-44720pø#X> 7ç™&IDMAP/SID2GID/S-1-5-21-2106152344-1726899929-2013803672-44720 1250949070/10000201pX=ÝHÿ™&IDMAP/GID2SID/10000200 1250949070/S-1-5-21-2140803266-1626024873-1299147156-34812p0!X>½Ž™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-34812 1250949070/10000200pàlX= $غ™&IDMAP/GID2SID/10000199 1250949070/S-1-5-21-2140803266-1626024873-1299147156-25445p ¢X>â†H=™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-25445 1250949070/10000199pX= N;.™&IDMAP/GID2SID/10000198 1250949070/S-1-5-21-1834383793-1770918451-929701000-153031p`™X>?¼™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-153031 1250949070/10000198pÌ$X= xž¡™&IDMAP/GID2SID/10000197 1250949070/S-1-5-21-2140803266-1626024873-1299147156-37994pð´X>¾ÏT™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-37994 1250949070/10000197pðÌX= ¢™&IDMAP/GID2SID/10000196 1250949070/S-1-5-21-2140803266-1626024873-1299147156-34810p¨"X>]Á8™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-34810 1250949070/10000196p@qX= Ìdˆ™&IDMAP/GID2SID/10000195 1250949070/S-1-5-21-1834383793-1770918451-929701000-127557pøMX>_ê/©™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-127557 1250949070/10000195pl%X= öÇû™&IDMAP/GID2SID/10000194 1250949070/S-1-5-21-2140803266-1626024873-1299147156-21488p¸,X>âlÙ&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-21488 1250949070/10000194pX< +o™&IDMAP/GID2SID/10000193 1250949070/S-1-5-21-1834383793-1770918451-929701000-68182BpðŸX=¾RÒL™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-68182 1250949070/10000193BpŒX< JŽâ™&IDMAP/GID2SID/10000192 1250949070/S-1-5-21-1834383793-1770918451-929701000-70902BpÀŠX=^Ý#>™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-70902 1250949070/10000192BpX= tñU™&IDMAP/GID2SID/10000191 1250949070/S-1-5-21-1834383793-1770918451-929701000-123200pÅX>ÛaQ,™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-123200 1250949070/10000191p|AX< žTÉ™&IDMAP/GID2SID/10000190 1250949070/S-1-5-21-1834383793-1770918451-929701000-45011BpßX=ŠÙr™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-45011 1250949070/10000190Bp,$X<]=sž™&IDMAP/GID2SID/10000189 1250949070/S-1-5-21-1834383793-1770918451-929701000-90536BpÀX=bs™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-90536 1250949070/10000189Bpð7X=]gÖ™&IDMAP/GID2SID/10000188 1250949070/S-1-5-21-1834383793-1770918451-929701000-122058p8õX>z¸ç™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-122058 1250949070/10000188pX=]‘9…™&IDMAP/GID2SID/10000187 1250949070/S-1-5-21-2140803266-1626024873-1299147156-23996pÈÛX>¾b±š™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-23996 1250949070/10000187pX;]»œø™&IDMAP/GID2SID/10000186 1250949070/S-1-5-21-4186143834-2626045635-1021053583-519BBpx”X<ÚS8Å™&IDMAP/SID2GID/S-1-5-21-4186143834-2626045635-1021053583-519 1250949070/10000186BBpø’X<]åÿk™&IDMAP/GID2SID/10000185 1250949070/S-1-5-21-4186143834-2626045635-1021053583-1651Bp€¦X=Ï6¯ ™&IDMAP/SID2GID/S-1-5-21-4186143834-2626045635-1021053583-1651 1250949070/10000185Bp8ÏX<]cß™&IDMAP/GID2SID/10000184 1250949070/S-1-5-21-4186143834-2626045635-1021053583-7620BplÙX=KT¾ç™&IDMAP/SID2GID/S-1-5-21-4186143834-2626045635-1021053583-7620 1250949070/10000184BpØ“X;]9ÆR™&IDMAP/GID2SID/10000183 1250949070/S-1-5-21-4186143834-2626045635-1021053583-513BBpІX<Òù š™&IDMAP/SID2GID/S-1-5-21-4186143834-2626045635-1021053583-513 1250949070/10000183BBpÆX<}⎙&IDMAP/UID2SID/10000063 1250949070/S-1-5-21-4186143834-2626045635-1021053583-1183BpDÁX=;vÊE™&IDMAP/SID2UID/S-1-5-21-4186143834-2626045635-1021053583-1183 1250949070/10000063BppRP,¾}8•™&SAF/DOMAIN/MTVNE.AD.VIACOM.COM 1251229469/NEW-BRDC01.mtvne.ad.viacom.comh@…D,Ugaô™&SAF/DOMAIN/MTVNE 1251229469/NEW-BRDC01.mtvne.ad.viacom.com:0\¨²D'}Rª™&SAF/DOMAIN/AD.VIACOM.COM 1251229419/musselbeach.ad.viacom.com\|ãD"0({x™&NBT/UNITYNYAD01.IPT.VIACOM.COM#20 1250705340/172.21.200.27:0B\ÔÇX=}6U™&IDMAP/UID2SID/10000061 1245280903/S-1-5-21-1834383793-1770918451-929701000-148440p°©X>›Ò7Ê™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148440 1245280903/10000061p0.X=­Ô7Ü™&IDMAP/GID2SID/10000176 1250795400/S-1-5-21-1834383793-1770918451-929701000-148426pì3X>3`ðý™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-148426 1250795400/10000176pœ2X=­þšO™&IDMAP/GID2SID/10000175 1251822591/S-1-5-21-1834383793-1770918451-929701000-148427phóX>3×b™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-148427 1251822591/10000175p rX=­Ra6™&IDMAP/GID2SID/10000173 1251823334/S-1-5-21-1834383793-1770918451-929701000-107054p@¢X>º'º™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-107054 1251823334/10000173p\‚X=­|Ä©™&IDMAP/GID2SID/10000172 1251823334/S-1-5-21-2140803266-1626024873-1299147156-20439pdÇX>bÜê™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-20439 1251823334/10000172pÜ>X<­¦'™&IDMAP/GID2SID/10000171 1251822594/S-1-5-21-4186143834-2626045635-1021053583-7668Bp¸X=ûæ\™&IDMAP/SID2GID/S-1-5-21-4186143834-2626045635-1021053583-7668 1251822594/10000171Bpœ9X<­ÐŠ™&IDMAP/GID2SID/10000170 1251823334/S-1-5-21-4186143834-2626045635-1021053583-7638BpàX=÷9™&IDMAP/SID2GID/S-1-5-21-4186143834-2626045635-1021053583-7638 1251823334/10000170Bpœ@X=ýo©e™&IDMAP/GID2SID/10000169 1251823334/S-1-5-21-1834383793-1770918451-929701000-117589plEX>ßêΙ&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-117589 1251823334/10000169p |X<ý™ Ù™&IDMAP/GID2SID/10000168 1251823334/S-1-5-21-1834383793-1770918451-929701000-90130BpˆMX=bχ™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-90130 1251823334/10000168Bp\BX=ýÃoL™&IDMAP/GID2SID/10000167 1251823334/S-1-5-21-1834383793-1770918451-929701000-102786pUX>7N;\™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-102786 1251823334/10000167ptÊX=ýíÒ¿™&IDMAP/GID2SID/10000166 1251823334/S-1-5-21-1834383793-1770918451-929701000-117590p´X>_ð%™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-117590 1251823334/10000166pDX<ý63™&IDMAP/GID2SID/10000165 1251823334/S-1-5-21-1834383793-1770918451-929701000-23125BpX€X=69Ìy™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-23125 1251823334/10000165Bp‰X<}`¸t™&IDMAP/UID2SID/10000060 1251823334/S-1-5-21-1834383793-1770918451-929701000-32616BpÀ¥X=r´# ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-32616 1251823334/10000060pˆƒD-úÊVd™&SAF/DOMAIN/NETDEV 1251229765/NETDEVROOTDC1.netdev.viacom.como\€X2U®ì™&NBT/PARAMOUNT.AD.VIACOM.COM#1C 1251229229/166.77.172.94:389,166.77.172.108:389-Bpl0L!ã§ ™&NBT/VIACOM_CORP.AD.VIACOM.COM#1C 1244247425/166.77.86.15:389com.comCdЯT+#fIÇ™&AD_SITENAME/DOMAIN/PARAMOUNT.AD.VIACOM.COM 4294967295/US-California-Burbank9:l¸›D#Ö«²5™&AD_SITENAME/DOMAIN/PARAMOUNT 4294967295/US-California-Burbank\À}áS«fæþÙNBT/MTVNE.AD.VIACOM.COM#4@©X32e½™&NBT/MTVNASIA.AD.VIACOM.COM#1C 1251229229/166.77.172.140:389,166.77.172.115:389ompÈkX#1ÿ2™¬™&SAF/DOMAIN/PARAMOUNT.AD.VIACOM.COM 1251229469/bubblebuddy.paramount.ad.viacom.compÍT(ïÓ ÷™&NBT/SQUILLIAM.MTVNASIA.AD.VIACOM.COM#20 1251229216/166.77.172.140:0acom.coml8¤T(‹£Ï×™&NBT/MISSTUFTSY.HOSTING.AD.VIACOM.COM#20 1251229217/166.77.173.152:0com.com1lT*#÷6½™&AD_SITENAME/DOMAIN/MTVNASIA.AD.VIACOM.COM 4294967295/US-California-Burbank9.2l°°D#í3à™&AD_SITENAME/DOMAIN/MTVNASIA 4294967295/US-California-Burbank\`¨0e÷ê™&IDMAP/SID2GID/S-1-5-32-546 1251227603/-1BHмL-xÄ¡™&SAF/DOMAIN/MTVN 1251234708/perchperkins.mtvn.ad.viacom.com.128:0mBBdXX=í·Íš™&IDMAP/GID2SID/10000533 1249523687/S-1-5-21-2140803266-1626024873-1299147156-34227pìñX>Š ³™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-34227 1249523687/10000533phNX=íá0™&IDMAP/GID2SID/10000532 1249523687/S-1-5-21-2140803266-1626024873-1299147156-37778pP X>fäè™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-37778 1249523687/10000532ppX=í ”™&IDMAP/GID2SID/10000531 1249523687/S-1-5-21-2140803266-1626024873-1299147156-19855pÝX>£é#™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-19855 1249523687/10000531p¬×X=í5÷ô™&IDMAP/GID2SID/10000530 1249523687/S-1-5-21-2140803266-1626024873-1299147156-34231pìãX> _9™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-34231 1249523687/10000530pøX==ÕÊ™&IDMAP/GID2SID/10000529 1249523687/S-1-5-21-2140803266-1626024873-1299147156-34228pýX>Џ†™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-34228 1249523687/10000529p(X==ÿx=™&IDMAP/GID2SID/10000528 1249523686/S-1-5-21-2140803266-1626024873-1299147156-34229plîX>Šhm}™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-34229 1249523686/10000528p°X==)ܰ™&IDMAP/GID2SID/10000527 1249523686/S-1-5-21-2140803266-1626024873-1299147156-34230pÈdX> nxÔ™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-34230 1249523686/10000527p<×X==S?$™&IDMAP/GID2SID/10000526 1249523685/S-1-5-21-1834383793-1770918451-929701000-136405pTÄX>3Eé™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-136405 1249523685/10000526p,éX=B÷^™&IDMAP/GID2SID/10000417 1249523685/S-1-5-21-1834383793-1770918451-929701000-160163p ãX>/ycx™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-160163 1249523685/10000417p<åX==}¢—™&IDMAP/GID2SID/10000525 1249523685/S-1-5-21-1834383793-1770918451-929701000-136403pXÍX>3å™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-136403 1249523685/10000525pÀ5X==§ ™&IDMAP/GID2SID/10000524 1250949070/S-1-5-21-1834383793-1770918451-929701000-136402p,X>35ܺ™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-136402 1250949070/10000524pÜîX==Ñh~™&IDMAP/GID2SID/10000523 1249523685/S-1-5-21-1834383793-1770918451-929701000-136406pX>3õvN™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-136406 1249523685/10000523p¸ÑX<=ûËñ™&IDMAP/GID2SID/10000522 1249523684/S-1-5-21-1834383793-1770918451-929701000-64158Bp¸íX=ºæÅP™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-64158 1249523684/10000522BpˆFX==%/e™&IDMAP/GID2SID/10000521 1249523684/S-1-5-21-1834383793-1770918451-929701000-136394püíX>=ñy™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-136394 1249523684/10000521pˆ‰X==O’Ø™&IDMAP/GID2SID/10000520 1249523684/S-1-5-21-1834383793-1770918451-929701000-136295p\äX>[³¾×™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-136295 1249523684/10000520päX=–½E™&IDMAP/GID2SID/10000415 1249523684/S-1-5-21-1834383793-1770918451-929701000-136410pìHX>³ 6Ô™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-136410 1249523684/10000415ppX=î°­™&IDMAP/GID2SID/10000519 1249523684/S-1-5-21-1834383793-1770918451-929701000-136296ppX>[c¥<™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-136296 1249523684/10000519p|êX=êƒ,™&IDMAP/GID2SID/10000413 1249523684/S-1-5-21-1834383793-1770918451-929701000-136409p°ÿX>3+}™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-136409 1249523684/10000413pxqX<!™&IDMAP/GID2SID/10000518 1249523684/S-1-5-21-1834383793-1770918451-929701000-90338Bp°X=b{;™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-90338 1249523684/10000518BpÈ/X=Bw”™&IDMAP/GID2SID/10000517 1249523684/S-1-5-21-1834383793-1770918451-929701000-136404pAX>3•©„™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-136404 1249523684/10000517p¼ïX=lÚ™&IDMAP/GID2SID/10000516 1249523683/S-1-5-21-1834383793-1770918451-929701000-143490p€ýX>³¶ºÓ™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-143490 1249523683/10000516pÐ*X=–={™&IDMAP/GID2SID/10000515 1250949070/S-1-5-21-1834383793-1770918451-929701000-136401p¬X>3…õU™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-136401 1250949070/10000515pxáX=À î™&IDMAP/GID2SID/10000514 1249523683/S-1-5-21-1834383793-1770918451-929701000-140136pÐþX>¯èË`™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-140136 1249523683/10000514p8lX=êb™&IDMAP/GID2SID/10000513 1249523683/S-1-5-21-1834383793-1770918451-929701000-128030pØNX>tÊ™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-128030 1249523683/10000513p0X=gÕ™&IDMAP/GID2SID/10000512 1249523683/S-1-5-21-1834383793-1770918451-929701000-116610pÔûX> ~’E™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-116610 1249523683/10000512pp-X=>ÊH™&IDMAP/GID2SID/10000511 1249523683/S-1-5-21-1834383793-1770918451-929701000-128031pð X>?[/™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-128031 1249523683/10000511pHX=h-¼™&IDMAP/GID2SID/10000510 1249523682/S-1-5-21-1834383793-1770918451-929701000-127548pH,X>ßdï*™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-127548 1249523682/10000510pX=ÝL‘™&IDMAP/GID2SID/10000509 1249523682/S-1-5-21-1834383793-1770918451-929701000-140137ppIX>¯˜²Å™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-140137 1249523682/10000509pðX<Ý1¯™&IDMAP/GID2SID/10000508 1249523682/S-1-5-21-1834383793-1770918451-929701000-56310BpèxX= üI ™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-56310 1249523682/10000508BpÐX=Ý[x™&IDMAP/GID2SID/10000507 1249523682/S-1-5-21-1834383793-1770918451-929701000-127549p<óX>ßÖ™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-127549 1249523682/10000507p¨zX=Ý…uë™&IDMAP/GID2SID/10000506 1249523682/S-1-5-21-1834383793-1770918451-929701000-119975p8ñX>Üj™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-119975 1249523682/10000506pà X=ݯØ^™&IDMAP/GID2SID/10000505 1249523682/S-1-5-21-1834383793-1770918451-929701000-105139ph÷X>¯ø³X™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-105139 1249523682/10000505p¨ÀX<ÝÙ;Ò™&IDMAP/GID2SID/10000504 1249523681/S-1-5-21-1834383793-1770918451-929701000-57905Bp¸ßX=ÞÍŸ§™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-57905 1249523681/10000504BpئX=ÝŸE™&IDMAP/GID2SID/10000503 1249523681/S-1-5-21-1834383793-1770918451-929701000-116608pØ@X>‹È ‰™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-116608 1249523681/10000503pœðX=Ý-¹™&IDMAP/GID2SID/10000502 1249523681/S-1-5-21-1834383793-1770918451-929701000-119974pè†X>,„©™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-119974 1249523681/10000502pˆöX=ÝWe,™&IDMAP/GID2SID/10000501 1249523681/S-1-5-21-1834383793-1770918451-929701000-140124pð)X>/S׳™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-140124 1249523681/10000501p°X<Á~›™&IDMAP/UID2SID/10000345 1249523680/S-1-5-21-1834383793-1770918451-929701000-64298BpøÁX=Òôë™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-64298 1249523680/10000345BpáX=Ýٻƙ&IDMAP/UID2SID/10000082 1251221751/S-1-5-21-1834383793-1770918451-929701000-129642pPÇX>ó¥ÁÒ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-129642 1251221751/10000082pX=]Ò$C™&IDMAP/UID2SID/10000307 1250630963/S-1-5-21-1834383793-1770918451-929701000-105272pFX>Ã_|™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-105272 1250630963/10000307pHíX=-÷a™&IDMAP/UID2SID/10000278 1249520768/S-1-5-21-1834383793-1770918451-929701000-129577p(ùX>Ç|Ÿm™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-129577 1249520768/10000278pPZ$Ö]­™&IDMAP/GID2SID/99 1251227602/-<ØåX=M1Q·™&IDMAP/GID2SID/10000455 1251825059/S-1-5-21-2140803266-1626024873-1299147156-32221pPˆX>Šèw»™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-32221 1251825059/10000455pX<M[´*™&IDMAP/GID2SID/10000454 1250200846/S-1-5-21-1834383793-1770918451-929701000-61975Bp¨ÜX=’¹A5™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-61975 1250200846/10000454BpìùX=M…ž™&IDMAP/GID2SID/10000453 1250200846/S-1-5-21-1834383793-1770918451-929701000-126290p¸ÃX>[CÒ™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-126290 1250200846/10000453pØÂX<h­†™&IDMAP/GID2SID/10000410 1251837380/S-1-5-21-1834383793-1770918451-929701000-80133BpˆÖX=â þ™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-80133 1251837380/10000410BpPX<M¯z™&IDMAP/GID2SID/10000452 1250200846/S-1-5-21-1834383793-1770918451-929701000-31583BplçX=>€2©™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-31583 1250200846/10000452BpÐX<MÙÝ„™&IDMAP/GID2SID/10000451 1251826461/S-1-5-21-1834383793-1770918451-929701000-95336Bpl>X=bÊü™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-95336 1251826461/10000451Bp(IH 6ù^ð™&TDOM/HOSTING 1250633849/S-1-5-21-656259101-2991933430-3416489241B`L74ðÙé™&TDOM/PARAMOUNT 1250633849/S-1-5-21-3692268541-1264201430-3503654325BBdÐ1H3³¡íÅ™&TDOM/HARMONIX 1250633849/S-1-5-21-73586283-776561741-725345543BBB`xÚH 7Ê•{á™&TDOM/MTVNE 1250633849/S-1-5-21-2106152344-1726899929-2013803672BB`L7}Þ]µ™&TDOM/MTVNASIA 1250633849/S-1-5-21-1736519922-1920428879-1691616715BBBdÐFH6ÿرG™&TDOM/PLAYASUR 1250633849/S-1-5-21-2129485696-1739684832-945835055` üL7ªÒ~™&TDOM/VIACOM_CORP 1250633849/S-1-5-21-2140803266-1626024873-1299147156d@ND7S[ý™&TDOM/AD 1250633849/S-1-5-21-4186143834-2626045635-1021053583B\P5X=>J¨™&IDMAP/GID2SID/10000211 1249517309/S-1-5-21-1834383793-1770918451-929701000-107951p˜4X>±VË™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-107951 1249517309/10000211pX=m†S˜™&IDMAP/UID2SID/10000337 1249517309/S-1-5-21-1834383793-1770918451-929701000-119188p°$X>—{ÝJ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119188 1249517309/10000337pèJX=m\ð$™&IDMAP/UID2SID/10000338 1249517309/S-1-5-21-1834383793-1770918451-929701000-119107p4X>—½Ì™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119107 1249517309/10000338p°9X=ëá™&IDMAP/UID2SID/10000344 1249435951/S-1-5-21-1834383793-1770918451-929701000-148518pØ×X>ÇëO™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148518 1249435951/10000344p"X=E‚™&IDMAP/UID2SID/10000343 1250273007/S-1-5-21-1834383793-1770918451-929701000-148361p¾X>ï³Sî™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148361 1250273007/10000343p ºX= Й&IDMAP/UID2SID/10000215 1250725628/S-1-5-21-1834383793-1770918451-929701000-141235pÐ?X>Ù¿3™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-141235 1250725628/10000215pð(¦)µS™&IDMAP/UID2SID/25238 1248799054/-B@?X=Í}™&IDMAP/UID2SID/10000256 1250639960/S-1-5-21-1834383793-1770918451-929701000-107870pØÉX>ËY‰™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-107870 1250639960/10000256pÐX<mÚ™&IDMAP/UID2SID/10000335 1250016356/S-1-5-21-1834383793-1770918451-929701000-95298BpPÕX=Òü°ƒ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-95298 1250016356/10000335BpøX<]~^\™&IDMAP/UID2SID/10000309 1249319817/S-1-5-21-1834383793-1770918451-929701000-79512Bp˜üX=r>á4™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-79512 1249319817/10000309Bp8ãX= PF™&IDMAP/UID2SID/10000315 1250546951/S-1-5-21-1834383793-1770918451-929701000-143917p,âX>w"¯™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143917 1250546951/10000315p©X< ¹‰_™&IDMAP/UID2SID/10000317 1251151498/S-1-5-21-1834383793-1770918451-929701000-95238BpðÐX=Ê¢Y™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-95238 1251151498/10000317BpùX=m‚¦L™&IDMAP/UID2SID/10000331 1251331910/S-1-5-21-1834383793-1770918451-929701000-148513pÐ’X>Ç{V™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148513 1251331910/10000331pÀ–X=]PN™&IDMAP/UID2SID/10000304 1251824050/S-1-5-21-1834383793-1770918451-929701000-141063p>X>ëf5™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-141063 1251824050/10000304pØ$X< ß?‡™&IDMAP/UID2SID/10000310 1250714401/S-1-5-21-1834383793-1770918451-929701000-50388Bp (X=&cø™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-50388 1250714401/10000310Bp¨aX=m¬ À™&IDMAP/UID2SID/10000330 1250878988/S-1-5-21-1834383793-1770918451-929701000-148506p %X>GV¢™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148506 1250878988/10000330p ïX=½›A0™&IDMAP/UID2SID/10000321 1251156944/S-1-5-21-1834383793-1770918451-929701000-143923pÈÍX>÷—;S™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143923 1251156944/10000321pÔX=½K(•™&IDMAP/UID2SID/10000329 1250273129/S-1-5-21-1834383793-1770918451-929701000-148435pOX> ’ß™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148435 1250273129/10000329p@X=M³']™&IDMAP/GID2SID/10000458 1251149354/S-1-5-21-1834383793-1770918451-929701000-129629pŸX>‹ã`k™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-129629 1251149354/10000458pðÉX<½u‹™&IDMAP/UID2SID/10000328 1250810099/S-1-5-21-1834383793-1770918451-929701000-87280Bp0¥X=&¿ÀÍ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-87280 1250810099/10000328Bpˆ¬X;½Ö™&IDMAP/UID2SID/10000324 1251146424/S-1-5-21-1834383793-1770918451-929701000-1480BBp`X<¡õMd™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-1480 1251146424/10000324BBpøëX=]c)1™&IDMAP/GID2SID/10000382 1251157458/S-1-5-21-1834383793-1770918451-929701000-118513pÀ²X>_Tí™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-118513 1251157458/10000382p¨X=]Œ¤™&IDMAP/GID2SID/10000381 1248890662/S-1-5-21-1834383793-1770918451-929701000-129546p˜ËX>ßâû™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-129546 1248890662/10000381p ˆX=-u-»™&IDMAP/UID2SID/10000275 1251157458/S-1-5-21-1834383793-1770918451-929701000-119215p(¯X>Ã.0í™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119215 1251157458/10000275p3X=ç4™&IDMAP/UID2SID/10000290 1251141635/S-1-5-21-1834383793-1770918451-929701000-129688pïX>󛯼™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-129688 1251141635/10000290p8X=½qÞ¼™&IDMAP/UID2SID/10000322 1251158331/S-1-5-21-1834383793-1770918451-929701000-148511p€X>ÇÂŒ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148511 1251158331/10000322p)X=½Å¤£™&IDMAP/UID2SID/10000320 1248824845/S-1-5-21-1834383793-1770918451-929701000-148375pP X>o©e™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148375 1248824845/10000320pàX=]¤„™&IDMAP/UID2SID/10000302 1251826461/S-1-5-21-1834383793-1770918451-929701000-148494p(ËX>ž–Í™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148494 1251826461/10000302p·X<êƒÁ™&IDMAP/UID2SID/10000291 1250717354/S-1-5-21-1834383793-1770918451-929701000-87529Bp°+X=žFª™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-87529 1250717354/10000291BpèX=ÜÛ™&IDMAP/GID2SID/10000440 1251224362/S-1-5-21-1834383793-1770918451-929701000-129604p(¶X>‹‘¬™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-129604 1251224362/10000440påX= ãìÒ™&IDMAP/UID2SID/10000316 1250644308/S-1-5-21-1834383793-1770918451-929701000-107715pø™X>Oƒ'™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-107715 1250644308/10000316pPãX= &ì™&IDMAP/UID2SID/10000318 1251141808/S-1-5-21-1834383793-1770918451-929701000-107850p€0X>Ëî·Â™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-107850 1251141808/10000318p˜çX= ‹y ™&IDMAP/UID2SID/10000312 1248801216/S-1-5-21-1834383793-1770918451-929701000-148504px°X>GöNØ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148504 1248801216/10000312p`FX=òxh™&IDMAP/GID2SID/10000441 1250632968/S-1-5-21-1834383793-1770918451-929701000-143927pxX>0uƒ™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-143927 1250632968/10000441pXRX==ûK¼™&IDMAP/GID2SID/10000422 1251822639/S-1-5-21-1834383793-1770918451-929701000-148502pŒX>ßn «™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-148502 1251822639/10000422pÐ#X=B÷ó™&IDMAP/UID2SID/10000295 1250525522/S-1-5-21-1834383793-1770918451-929701000-148311pØ÷X>o¨~™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148311 1250525522/10000295pøòX=½ó´÷™&IDMAP/UID2SID/10000125 1251141450/S-1-5-21-1834383793-1770918451-929701000-129580pРX>GâwŽ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-129580 1251141450/10000125pxèX= µÜ™&IDMAP/UID2SID/10000311 1249660297/S-1-5-21-1834383793-1770918451-929701000-148288pŒØX>Cµ×o™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148288 1249660297/10000311pHvX= 7³¹™&IDMAP/UID2SID/10000314 1250637001/S-1-5-21-1834383793-1770918451-929701000-148454p@ËX>Èù@™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148454 1250637001/10000314pÐ8X=ÍÑÆõ™&IDMAP/UID2SID/10000254 1250697509/S-1-5-21-1834383793-1770918451-929701000-107733p¨ZX>Z$™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-107733 1250697509/10000254p 3X< a-™&IDMAP/UID2SID/10000313 1250708830/S-1-5-21-1834383793-1770918451-929701000-87285Bp°2X=¦Ê„=™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-87285 1250708830/10000313BptX=ݯX¾™&IDMAP/UID2SID/10000283 1251152830/S-1-5-21-1834383793-1770918451-929701000-148134pH§X>°_e™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148134 1251152830/10000283p :X=ÝÙ»1™&IDMAP/UID2SID/10000282 1251152830/S-1-5-21-1834383793-1770918451-929701000-148232p¸oX>É«¢™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148232 1251152830/10000282pà¿X=Íû)i™&IDMAP/UID2SID/10000253 1250817897/S-1-5-21-1834383793-1770918451-929701000-148133pX>y™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148133 1250817897/10000253p˜ X=Í%Ü™&IDMAP/UID2SID/10000252 1250905890/S-1-5-21-1834383793-1770918451-929701000-148467p¸X>› ÕR™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148467 1250905890/10000252pÏX=-KÊG™&IDMAP/UID2SID/10000276 1250642444/S-1-5-21-1834383793-1770918451-929701000-148405p(çX>›l6™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148405 1250642444/10000276p8SX=]¤”N™&IDMAP/UID2SID/10000202 1249922180/S-1-5-21-1834383793-1770918451-929701000-143764p°X>ŸªŒ6™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143764 1249922180/10000202p(pX=}æ»›™&IDMAP/UID2SID/10000169 1250872003/S-1-5-21-1834383793-1770918451-929701000-143826pPX>KnÖz™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143826 1250872003/10000169pèèX=-Ÿ.™&IDMAP/UID2SID/10000274 1250720448/S-1-5-21-1834383793-1770918451-929701000-143924pÈéX>÷G"¸™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143924 1250720448/10000274pÈ`X<=ÑèH™&IDMAP/GID2SID/10000423 1251824032/S-1-5-21-1834383793-1770918451-929701000-95355Bpˆ[X=:NÕ'™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-95355 1251824032/10000423Bp 6X==ÿø™&IDMAP/GID2SID/10000428 1250282972/S-1-5-21-2140803266-1626024873-1299147156-31601p áX>:d.D™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-31601 1250282972/10000428pxX<=)\{™&IDMAP/GID2SID/10000427 1250282972/S-1-5-21-1834383793-1770918451-929701000-42569Bp X=æ¥Én™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-42569 1250282972/10000427Bp@9X<=S¿î™&IDMAP/GID2SID/10000426 1250282972/S-1-5-21-1834383793-1770918451-929701000-42575Bp0X=’ Fé™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-42575 1250282972/10000426Bpp4X==}"b™&IDMAP/GID2SID/10000425 1251156944/S-1-5-21-1834383793-1770918451-929701000-148356p ›X>ÇL ™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-148356 1251156944/10000425p°@X==§…Õ™&IDMAP/GID2SID/10000424 1250282972/S-1-5-21-1834383793-1770918451-929701000-129729püæX>7zr™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-129729 1250282972/10000424p€7X<]ü‡¶™&IDMAP/UID2SID/10000306 1250282972/S-1-5-21-1834383793-1770918451-929701000-25076BpœâX=z¶H³™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-25076 1250282972/10000306Bp¸OX=]&ë)™&IDMAP/UID2SID/10000305 1250282972/S-1-5-21-1834383793-1770918451-929701000-143919p°X>w‚|Í™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143919 1250282972/10000305pØUX=lZg™&IDMAP/UID2SID/10000294 1250790522/S-1-5-21-1834383793-1770918451-929701000-143654ppâX>s;LL™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143654 1250790522/10000294pøtX=ÍyÓ™&IDMAP/UID2SID/10000150 1250873129/S-1-5-21-1834383793-1770918451-929701000-107732p(îX>ª¿™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-107732 1250873129/10000150p¸ØX=m†Ób™&IDMAP/UID2SID/10000237 1250883037/S-1-5-21-1834383793-1770918451-929701000-143858p(œX>Ënî™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143858 1250883037/10000237p˜ÙX=m°6Ö™&IDMAP/UID2SID/10000236 1250883036/S-1-5-21-1834383793-1770918451-929701000-143884p ¾X>KOô™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143884 1250883036/10000236pˆX<?¨Š™&IDMAP/UID2SID/10000142 1250723949/S-1-5-21-1834383793-1770918451-929701000-79422Bp:X=xšn™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-79422 1250723949/10000142Bp¨¤X=E™&IDMAP/UID2SID/10000143 1251133100/S-1-5-21-1834383793-1770918451-929701000-129687p˜…X>óëßW™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-129687 1251133100/10000143p¨X=]Îw÷™&IDMAP/UID2SID/10000301 1251824032/S-1-5-21-1834383793-1770918451-929701000-108847pàŽX>K‰?o™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-108847 1251824032/10000301p AX; ¢€™&IDMAP/GID2SID/10000396 1248213312/S-1-5-21-4186143834-2626045635-1021053583-515BBp˜àX<*mÓ¨™&IDMAP/SID2GID/S-1-5-21-4186143834-2626045635-1021053583-515 1248213312/10000396BBpØšX<šj&™&IDMAP/UID2SID/10000299 1248213312/S-1-5-21-4186143834-2626045635-1021053583-1251Bp7X=7^7™&IDMAP/SID2UID/S-1-5-21-4186143834-2626045635-1021053583-1251 1248213312/10000299BpøfX= Ìdó™&IDMAP/GID2SID/10000395 1248213312/S-1-5-21-2140803266-1626024873-1299147156-25446px'X>â6/¢™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-25446 1248213312/10000395pøÝX= öÇf™&IDMAP/GID2SID/10000394 1248213312/S-1-5-21-1834383793-1770918451-929701000-122075p8EX>ÕR™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-122075 1248213312/10000394px·X< +Ú™&IDMAP/GID2SID/10000393 1248213312/S-1-5-21-1834383793-1770918451-929701000-68183BpH}X=>ˆù/™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-68183 1248213312/10000393Bpà;X= JŽM™&IDMAP/GID2SID/10000392 1248213311/S-1-5-21-1834383793-1770918451-929701000-122068pøX>ƒ¯ßÊ™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-122068 1248213311/10000392p˜^X< tñÀ™&IDMAP/GID2SID/10000391 1248213311/S-1-5-21-4186143834-2626045635-1021053583-1647BpXéX=#>U™&IDMAP/SID2GID/S-1-5-21-4186143834-2626045635-1021053583-1647 1248213311/10000391Bpø?X< žT4™&IDMAP/GID2SID/10000390 1248213311/S-1-5-21-4186143834-2626045635-1021053583-1211Bpp;X=PÊ·™&IDMAP/SID2GID/S-1-5-21-4186143834-2626045635-1021053583-1211 1248213311/10000390BpøäX<]=s ™&IDMAP/GID2SID/10000389 1251822594/S-1-5-21-4186143834-2626045635-1021053583-1246BpðNX=£Ú<™&IDMAP/SID2GID/S-1-5-21-4186143834-2626045635-1021053583-1246 1251822594/10000389BpäX<]gÖ|™&IDMAP/GID2SID/10000388 1248213311/S-1-5-21-4186143834-2626045635-1021053583-1245Bp›X=#Ó²Y™&IDMAP/SID2GID/S-1-5-21-4186143834-2626045635-1021053583-1245 1248213311/10000388BpxÅX<]‘9ð™&IDMAP/GID2SID/10000387 1248213311/S-1-5-21-4186143834-2626045635-1021053583-7625BppBX=Ë_‚W™&IDMAP/SID2GID/S-1-5-21-4186143834-2626045635-1021053583-7625 1248213311/10000387Bp8ÎX<]»œc™&IDMAP/GID2SID/10000386 1248213310/S-1-5-21-4186143834-2626045635-1021053583-1194Bp0»X=ÿ½©Ì™&IDMAP/SID2GID/S-1-5-21-4186143834-2626045635-1021053583-1194 1248213310/10000386BpØcX<]åÿÖ™&IDMAP/GID2SID/10000385 1248213310/S-1-5-21-4186143834-2626045635-1021053583-1255BpXYX=Ï Ì`™&IDMAP/SID2GID/S-1-5-21-4186143834-2626045635-1021053583-1255 1248213310/10000385BpPJX<]cJ™&IDMAP/GID2SID/10000384 1251822594/S-1-5-21-4186143834-2626045635-1021053583-1247BpðhX=#> ™&IDMAP/SID2GID/S-1-5-21-4186143834-2626045635-1021053583-1247 1251822594/10000384BpˆžX<]9ƽ™&IDMAP/GID2SID/10000383 1248213305/S-1-5-21-4186143834-2626045635-1021053583-7619Bp°NX=üÝ™&IDMAP/SID2GID/S-1-5-21-4186143834-2626045635-1021053583-7619 1248213305/10000383Bp80X<ÄÍ™™&IDMAP/UID2SID/10000298 1248213305/S-1-5-21-4186143834-2626045635-1021053583-1662Bpè”X=cÍPW™&IDMAP/SID2UID/S-1-5-21-4186143834-2626045635-1021053583-1662 1248213305/10000298BpðµX=-Í ¸™&IDMAP/UID2SID/10000179 1250898780/S-1-5-21-1834383793-1770918451-929701000-143653pÈžX>s‹eç™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143653 1250898780/10000179p8X=Ý1/d™&IDMAP/UID2SID/10000286 1249607338/S-1-5-21-1834383793-1770918451-929701000-143682pÌÝX>ó{ô+™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143682 1249607338/10000286pXDX=]·ï™&IDMAP/GID2SID/10000380 1249576851/S-1-5-21-1834383793-1770918451-929701000-113649pÀêX>‹N™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-113649 1249576851/10000380pEX=­Ví™&IDMAP/GID2SID/10000379 1249576851/S-1-5-21-1834383793-1770918451-929701000-126724p¼ÓX>7­Ø™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-126724 1249576851/10000379pø8X<-Éó¡™&IDMAP/UID2SID/10000273 1249576851/S-1-5-21-1834383793-1770918451-929701000-87132BpœéX=Ê 1£™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-87132 1249576851/10000273BpxÓX=-óV™&IDMAP/UID2SID/10000272 1249576851/S-1-5-21-1834383793-1770918451-929701000-143734pŒÑX> ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143734 1249576851/10000272pøÈX= eà ™&IDMAP/UID2SID/10000119 1251141204/S-1-5-21-1834383793-1770918451-929701000-148269pÈ—X>Cúo™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148269 1251141204/10000119p8"X<}ŸD™&IDMAP/UID2SID/10000268 1251332584/S-1-5-21-1834383793-1770918451-929701000-95887BpÀJX=¦°i™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-95887 1251332584/10000268BpèX<–½Ú™&IDMAP/UID2SID/10000293 1251218467/S-1-5-21-1834383793-1770918451-929701000-56083Bp¬5X=¦W±ö™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-56083 1251218467/10000293Bp(“X=À N™&IDMAP/UID2SID/10000292 1251218467/S-1-5-21-1834383793-1770918451-929701000-148487péX>›x#™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148487 1251218467/10000292pØÐX=Í}€Ù™&IDMAP/UID2SID/10000156 1251130673/S-1-5-21-1834383793-1770918451-929701000-119384pàœX>ï.uÅ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119384 1251130673/10000156pèX=}:¸™&IDMAP/UID2SID/10000267 1250527000/S-1-5-21-1834383793-1770918451-929701000-107719p°ÒX>»™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-107719 1250527000/10000267p´ÈX=­ÐŠû™&IDMAP/GID2SID/10000370 1250637001/S-1-5-21-1834383793-1770918451-929701000-148229pLÚX>Ûüq™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-148229 1250637001/10000370pX¸X=ͧc‚™&IDMAP/UID2SID/10000255 1251221622/S-1-5-21-1834383793-1770918451-929701000-118468pàX>›½zÌ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-118468 1251221622/10000255pøX=ݳ ™&IDMAP/UID2SID/10000289 1248110172/S-1-5-21-1834383793-1770918451-929701000-128816p¸ÊX>Ë8¹ý™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-128816 1248110172/10000289pX=X=Í):(™&IDMAP/UID2SID/10000258 1251218400/S-1-5-21-1834383793-1770918451-929701000-148496p{X>þc—™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148496 1251218400/10000258p87X=ÝÝh}™&IDMAP/UID2SID/10000288 1250873479/S-1-5-21-1834383793-1770918451-929701000-107559pD+ø+fæþÙAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA,à‡X=î0N™&IDMAP/GID2SID/10000819 1251836362/S-1-5-21-1834383793-1770918451-929701000-141393p¸3X>•`™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-141393 1251836362/10000819p0`X=½›Áe™&IDMAP/UID2SID/10000421 1251836362/S-1-5-21-1834383793-1770918451-929701000-146636p ©X=}æ;Ñ™&IDMAP/UID2SID/10000269 1251826461/S-1-5-21-1834383793-1770918451-929701000-119073p€µX<½Å$Ù™&IDMAP/UID2SID/10000420 1251826461/S-1-5-21-1834383793-1770918451-929701000-45487Bp`‹X=¦Õï7™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-45487 1251826461/10000420Bp`¡X=ÝÌ1™&IDMAP/GID2SID/10000809 1251825059/S-1-5-21-2140803266-1626024873-1299147156-33834p X>ˆJÅ™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-33834 1251825059/10000809p X=Ý1/¥™&IDMAP/GID2SID/10000808 1251825059/S-1-5-21-2140803266-1626024873-1299147156-37682pˆX>:ÀŽ’™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-37682 1251825059/10000808ppeX<Ý[’™&IDMAP/GID2SID/10000807 1251825059/S-1-5-21-1834383793-1770918451-929701000-56883BpÈÔX=>0Pþ™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-56883 1251825059/10000807BpЋX<Ý…õ‹™&IDMAP/GID2SID/10000806 1251825058/S-1-5-21-1834383793-1770918451-929701000-56882Bp ‰X=¾ú(™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-56882 1251825058/10000806Bp ±X<ݯXÿ™&IDMAP/GID2SID/10000805 1251825058/S-1-5-21-1834383793-1770918451-929701000-56884BpÃX=¾ewá™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-56884 1251825058/10000805Bp؃X<ÝÙ»r™&IDMAP/GID2SID/10000804 1251825058/S-1-5-21-1834383793-1770918451-929701000-88296Bp X=jÏ™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-88296 1251825058/10000804BpÈ•X; eC®™&IDMAP/UID2SID/10000419 1251825058/S-1-5-21-1834383793-1770918451-929701000-9105BBp êX=Ý-‚Y™&IDMAP/GID2SID/10000802 1251824351/S-1-5-21-2140803266-1626024873-1299147156-13928pÈ(X>>Là™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-13928 1251824351/10000802p”X=ÝWåÌ™&IDMAP/GID2SID/10000801 1251824350/S-1-5-21-2140803266-1626024873-1299147156-24501pˆÏX>Ž*ÊV™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-24501 1251824350/10000801pè X=ÝH@™&IDMAP/GID2SID/10000800 1251824350/S-1-5-21-2140803266-1626024873-1299147156-20408pàIX>âÀÜ™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-20408 1251824350/10000800pPSX= $Øû™&IDMAP/GID2SID/10000799 1251824350/S-1-5-21-2140803266-1626024873-1299147156-15905pØGX>>Ѿ…™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-15905 1251824350/10000799p¤ÅX; ¦!™&IDMAP/UID2SID/10000418 1251824350/S-1-5-21-1834383793-1770918451-929701000-1428BBpfX<Ã×Ì™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-1428 1251824350/10000418BBpðŠX==§ ™&IDMAP/GID2SID/10000324 1251823992/S-1-5-21-2140803266-1626024873-1299147156-19915p¨˜X>¾fž™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-19915 1251823992/10000324p˜¿X==%/ú™&IDMAP/GID2SID/10000321 1251823992/S-1-5-21-2140803266-1626024873-1299147156-22860p HX>’hZ¿™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-22860 1251823992/10000321p€X= xžâ™&IDMAP/GID2SID/10000797 1251823992/S-1-5-21-2140803266-1626024873-1299147156-25508p¨ãX>Žúxæ™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-25508 1251823992/10000797pÀX<=ÿxÒ™&IDMAP/GID2SID/10000328 1251823992/S-1-5-21-4186143834-2626045635-1021053583-7657Bpˆ'X=Ïw3™&IDMAP/SID2GID/S-1-5-21-4186143834-2626045635-1021053583-7657 1251823992/10000328Bp ªX==)ÜE™&IDMAP/GID2SID/10000327 1251823992/S-1-5-21-1834383793-1770918451-929701000-105930p »X>–a™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-105930 1251823992/10000327p€§X< ¹ •™&IDMAP/UID2SID/10000417 1251823991/S-1-5-21-1834383793-1770918451-929701000-44651Bp¹X=¢ç ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-44651 1251823991/10000417Bp ¦X< ÌdÉ™&IDMAP/GID2SID/10000795 1251823775/S-1-5-21-1834383793-1770918451-929701000-43407Bp¥X=Þàîþ™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-43407 1251823775/10000795BpØnX< öÇ<™&IDMAP/GID2SID/10000794 1251823775/S-1-5-21-1834383793-1770918451-929701000-86979BpèX=’?Ü®™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-86979 1251823775/10000794Bp0TX= +°™&IDMAP/GID2SID/10000793 1251823775/S-1-5-21-1834383793-1770918451-929701000-156559pQX>_JÞ™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-156559 1251823775/10000793pLFX< ãl™&IDMAP/UID2SID/10000416 1251823775/S-1-5-21-1834383793-1770918451-929701000-52123Bp ÏX=žýÍ_™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-52123 1251823775/10000416Bp;X< Ð{™&IDMAP/UID2SID/10000415 1251823756/S-1-5-21-1834383793-1770918451-929701000-56596BpˆTX=M…—>™&IDMAP/GID2SID/10000753 1251836362/S-1-5-21-1834383793-1770918451-929701000-161746p ‚X>7xÀØ™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-161746 1251836362/10000753pp•X=¶™&IDMAP/UID2SID/10000396 1251227786/S-1-5-21-1834383793-1770918451-929701000-162831pÚX>Ë3ò4™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-162831 1251227786/10000396p<¿X=]øZ ™&IDMAP/UID2SID/10000400 1251221604/S-1-5-21-1834383793-1770918451-929701000-162836pÀ.X>Ë£s-™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-162836 1251221604/10000400pØ+X=—(™&IDMAP/UID2SID/10000346 1251217829/S-1-5-21-1834383793-1770918451-929701000-148520pȱX>G¡ ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148520 1251217829/10000346pBX=ÝÙ;=™&IDMAP/GID2SID/10000704 1251823334/S-1-5-21-1834383793-1770918451-929701000-164366pà²X>‡üÉê™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-164366 1251823334/10000704pèÌX<î°B™&IDMAP/UID2SID/10000397 1251148929/S-1-5-21-1834383793-1770918451-929701000-87035Bpˆ*X=JªF™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-87035 1251148929/10000397Bp@X= 73„™&IDMAP/UID2SID/10000214 1251132082/S-1-5-21-1834383793-1770918451-929701000-143829p–X>K~Š©™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143829 1251132082/10000214pÀÕX<Bw)™&IDMAP/UID2SID/10000395 1251131643/S-1-5-21-1834383793-1770918451-929701000-50238Bp@¯X=ÊB5™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-50238 1251131643/10000395Bp8zX=lÚœ™&IDMAP/UID2SID/10000394 1250901558/S-1-5-21-1834383793-1770918451-929701000-155079pxxX>k¼¯É™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-155079 1250901558/10000394p‰X=À ƒ™&IDMAP/UID2SID/10000392 1250892461/S-1-5-21-1834383793-1770918451-929701000-162830ppX>˃ Й&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-162830 1250892461/10000392p¬!X=í9¤«™&IDMAP/GID2SID/10000736 1250868294/S-1-5-21-1834383793-1770918451-929701000-162201p8ÿX>Û„ý™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-162201 1250868294/10000736pà-X=íj’™&IDMAP/GID2SID/10000734 1250802845/S-1-5-21-1834383793-1770918451-929701000-126298pÐwX>[ø™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-126298 1250802845/10000734pÈRX=í·Í™&IDMAP/GID2SID/10000733 1250802845/S-1-5-21-1834383793-1770918451-929701000-126309p ¥X>‡Ë¦'™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-126309 1250802845/10000733pè_X=íá0y™&IDMAP/GID2SID/10000732 1250802845/S-1-5-21-1834383793-1770918451-929701000-125539p€ìX>_ßî™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-125539 1250802845/10000732pLX=í ”ì™&IDMAP/GID2SID/10000731 1250802845/S-1-5-21-1834383793-1770918451-929701000-126308pPÀX>‡À™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-126308 1250802845/10000731p€åX=í5÷_™&IDMAP/GID2SID/10000730 1250802845/S-1-5-21-1834383793-1770918451-929701000-163450pÌäX>³àóã™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-163450 1250802845/10000730pHÑX==Õ5™&IDMAP/GID2SID/10000729 1250802845/S-1-5-21-1834383793-1770918451-929701000-126310pL8X>ѱ~™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-126310 1250802845/10000729p€)X==ÿx¨™&IDMAP/GID2SID/10000728 1250802845/S-1-5-21-1834383793-1770918451-929701000-126223pà£X>ÛÜs‰™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-126223 1250802845/10000728pHßX==)Ü™&IDMAP/GID2SID/10000727 1250802845/S-1-5-21-1834383793-1770918451-929701000-126305pP«X>‡ ”™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-126305 1250802845/10000727pÌX==S?™&IDMAP/GID2SID/10000726 1250802845/S-1-5-21-1834383793-1770918451-929701000-126434ppÆX>³5´ß™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-126434 1250802845/10000726pð0X==}¢™&IDMAP/GID2SID/10000725 1250802845/S-1-5-21-1834383793-1770918451-929701000-163462pÀ¤X>3vè™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-163462 1250802845/10000725pÓX<=§v™&IDMAP/GID2SID/10000724 1250802845/S-1-5-21-1834383793-1770918451-929701000-32714Bp0³X= "ÔC™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-32714 1250802845/10000724BphGX<=Ñhé™&IDMAP/GID2SID/10000723 1250802845/S-1-5-21-1834383793-1770918451-929701000-40067Bp0¬X=æ:Å™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-40067 1250802845/10000723Bp@X<=ûË\™&IDMAP/GID2SID/10000722 1250802845/S-1-5-21-1834383793-1770918451-929701000-31839Bp°¶X=â ¿@™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-31839 1250802845/10000722Bpˆ—X==%/Й&IDMAP/GID2SID/10000721 1250802845/S-1-5-21-1834383793-1770918451-929701000-112531pX/X>__.4™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-112531 1250802845/10000721p ·X<=O’C™&IDMAP/GID2SID/10000720 1250802845/S-1-5-21-1834383793-1770918451-929701000-25192Bp X=j,Ýž™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-25192 1250802845/10000720Bp ³X<î°™&IDMAP/GID2SID/10000719 1250802845/S-1-5-21-1834383793-1770918451-929701000-40068BphìX=fpìë™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-40068 1250802845/10000719BpÈ÷X<Œ™&IDMAP/GID2SID/10000718 1250802845/S-1-5-21-1834383793-1770918451-929701000-40069BplX=æ¥Ï™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-40069 1250802845/10000718BphÐX=ÝÝè²™&IDMAP/UID2SID/10000388 1250802845/S-1-5-21-1834383793-1770918451-929701000-117852pð­X>ËNðÚ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-117852 1250802845/10000388pHAX=ÝŸÚ™&IDMAP/UID2SID/10000381 1250789362/S-1-5-21-1834383793-1770918451-929701000-148066pHHX>ëv‰Ñ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148066 1250789362/10000381p0X=ÝŸ°™&IDMAP/GID2SID/10000703 1251823992/S-1-5-21-1834383793-1770918451-929701000-126387p°cX>‡w™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-126387 1251823992/10000703pˆëX=Ý-$™&IDMAP/GID2SID/10000702 1251825058/S-1-5-21-1834383793-1770918451-929701000-126428pà«X>3À'™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-126428 1251825058/10000702pyX=ÝWe—™&IDMAP/GID2SID/10000701 1250789304/S-1-5-21-1834383793-1770918451-929701000-126437pHOX>³Eh™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-126437 1250789304/10000701pH‹X<ÝÈ ™&IDMAP/GID2SID/10000700 1250789304/S-1-5-21-1834383793-1770918451-929701000-36752Bp8–X=º–Ó™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-36752 1250789304/10000700BpÈ=X=Ý[ ™&IDMAP/UID2SID/10000385 1250789303/S-1-5-21-1834383793-1770918451-929701000-107912pH%X>w²™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-107912 1250789303/10000385p˜X=MAø™&IDMAP/GID2SID/10000450 1251826461/S-1-5-21-1834383793-1770918451-929701000-113651p`¶X> öΙ&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-113651 1251826461/10000450prX<¢_Í™&IDMAP/GID2SID/10000449 1250727725/S-1-5-21-1834383793-1770918451-929701000-72021BpX=6¥Í™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-72021 1250727725/10000449BpíX<ÌÂ@™&IDMAP/GID2SID/10000448 1250727725/S-1-5-21-1834383793-1770918451-929701000-38288Bp€ X=¾‹ÃÕ™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-38288 1250727725/10000448BpýX=ö%´™&IDMAP/GID2SID/10000447 1250727725/S-1-5-21-1834383793-1770918451-929701000-129730pèCX>·"…É™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-129730 1250727725/10000447plX< ‰'™&IDMAP/GID2SID/10000446 1250727725/S-1-5-21-1834383793-1770918451-929701000-52150Bp\òX=ºâBh™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-52150 1250727725/10000446BpHÊX<Jìš™&IDMAP/GID2SID/10000445 1250727725/S-1-5-21-1834383793-1770918451-929701000-38267Bpp X=æâiä™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-38267 1250727725/10000445BpìX<tO™&IDMAP/GID2SID/10000444 1250727725/S-1-5-21-1834383793-1770918451-929701000-72022Bp&X=¶P̰™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-72022 1250727725/10000444Bp@ŽX=ݯØó™&IDMAP/UID2SID/10000383 1250727725/S-1-5-21-1834383793-1770918451-929701000-108002pÀX>ëuW±™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-108002 1250727725/10000383p X=ÝÙ;g™&IDMAP/UID2SID/10000382 1250715069/S-1-5-21-1834383793-1770918451-929701000-119502p¨ÕX>G– ð™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119502 1250715069/10000382p¸:X=Ý-N™&IDMAP/UID2SID/10000380 1250713298/S-1-5-21-1834383793-1770918451-929701000-162826pxJX>KnLJ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-162826 1250713298/10000380pÀ¹X=-Í #™&IDMAP/UID2SID/10000379 1250708248/S-1-5-21-1834383793-1770918451-929701000-162825p4X>K¾eå™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-162825 1250708248/10000379pÐX=-!ç ™&IDMAP/UID2SID/10000377 1250616247/S-1-5-21-1834383793-1770918451-929701000-148175p€ X>6ãV™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148175 1250616247/10000377p¨ÿX=-KJ}™&IDMAP/UID2SID/10000376 1250545067/S-1-5-21-1834383793-1770918451-929701000-119375pä+X>o©4G™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-119375 1250545067/10000376p€>X=-u­ð™&IDMAP/UID2SID/10000375 1251138855/S-1-5-21-1834383793-1770918451-929701000-162822p˜X>K®±¶™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-162822 1251138855/10000375p ,X=-És×™&IDMAP/UID2SID/10000373 1251147240/S-1-5-21-1834383793-1770918451-929701000-107933p€"X>wÍ62™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-107933 1251147240/10000373p ÖX=ͧ㷙&IDMAP/UID2SID/10000355 1250870551/S-1-5-21-1834383793-1770918451-929701000-162815p@ÙX>ˈ>™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-162815 1250870551/10000355px<X=-óÖJ™&IDMAP/UID2SID/10000372 1250211480/S-1-5-21-1834383793-1770918451-929701000-162820pÐÑX>KNäì™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-162820 1250211480/10000372p—X=-G1™&IDMAP/UID2SID/10000370 1250200570/S-1-5-21-1834383793-1770918451-929701000-162819p\ÝX>ËHÙ•™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-162819 1250200570/10000370p`1X=}ŽHÔ™&IDMAP/UID2SID/10000365 1250123071/S-1-5-21-1834383793-1770918451-929701000-107826ppÍX>Knªv™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-107826 1250123071/10000365pÐíX==S¿Y™&IDMAP/GID2SID/10000626 1251141210/S-1-5-21-1834383793-1770918451-929701000-126435p€ŒX>³åšD™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-126435 1251141210/10000626p<ìX==}"Í™&IDMAP/GID2SID/10000625 1251141210/S-1-5-21-1834383793-1770918451-929701000-126439pl#X>³¥5Ø™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-126439 1251141210/10000625p¬ÐX<=§…@™&IDMAP/GID2SID/10000624 1251825059/S-1-5-21-1834383793-1770918451-929701000-46326Bp¨«X=¶ÛG™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-46326 1251825059/10000624Bp8X<=Ñè³™&IDMAP/GID2SID/10000623 1251141210/S-1-5-21-1834383793-1770918451-929701000-43571BpÛX=’3«™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-43571 1251141210/10000623BplÒX==ûK'™&IDMAP/GID2SID/10000622 1251141210/S-1-5-21-1834383793-1770918451-929701000-126436pèXX>³•©™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-126436 1251141210/10000622p`æX==%¯š™&IDMAP/GID2SID/10000621 1251141210/S-1-5-21-1834383793-1770918451-929701000-126438p(õX>³õNs™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-126438 1251141210/10000621pX==O™&IDMAP/GID2SID/10000620 1251141210/S-1-5-21-1834383793-1770918451-929701000-113227pÀX>Ûœƒf™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-113227 1251141210/10000620pÀÜX<î0ã™&IDMAP/GID2SID/10000619 1251141210/S-1-5-21-1834383793-1770918451-929701000-35587Bp°šX=>V{o™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-35587 1251141210/10000619Bp(ƒX=”V™&IDMAP/GID2SID/10000618 1251141210/S-1-5-21-1834383793-1770918451-929701000-107859ph2X>c÷rë™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-107859 1251141210/10000618pÑX=B÷É™&IDMAP/GID2SID/10000617 1251141210/S-1-5-21-1834383793-1770918451-929701000-112793pX•X>·s_™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-112793 1251141210/10000617p0/X;ÍyÓø™&IDMAP/UID2SID/10000350 1251141210/S-1-5-21-1834383793-1770918451-929701000-1401BBpè°X<M/g™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-1401 1251141210/10000350BBphÞX=}â»™&IDMAP/UID2SID/10000363 1250118308/S-1-5-21-1834383793-1770918451-929701000-162818pˆX>˘ò0™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-162818 1250118308/10000363p`#X<Í}€D™&IDMAP/UID2SID/10000356 1250784084/S-1-5-21-1834383793-1770918451-929701000-64745BpÜÒX=ö3ØÓ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-64745 1250784084/10000356Bp<ÞX=½ó´b™&IDMAP/UID2SID/10000325 1250095624/S-1-5-21-1834383793-1770918451-929701000-148418p@$X>²÷G™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148418 1250095624/10000325px X=ÍÿVê™&IDMAP/UID2SID/10000359 1250809196/S-1-5-21-1834383793-1770918451-929701000-162817pÈX>Ëè Ì™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-162817 1250809196/10000359p0ëX= N;™&IDMAP/GID2SID/10000598 1251837380/S-1-5-21-1834383793-1770918451-929701000-129656phUX> t"æ™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-129656 1251837380/10000598pŒßX; xžw™&IDMAP/GID2SID/10000597 1251823756/S-1-5-21-1834383793-1770918451-929701000-9881BBp<X<åÇLÕ™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-9881 1251823756/10000597BBpŒíX< ¢ë™&IDMAP/GID2SID/10000596 1251837380/S-1-5-21-1834383793-1770918451-929701000-34573BpŠX=’FZ™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-34573 1251837380/10000596Bp äX< Ìd^™&IDMAP/GID2SID/10000595 1251837380/S-1-5-21-1834383793-1770918451-929701000-65873Bp|X=’N?Û™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-65873 1251837380/10000595Bp0äX=í9$v™&IDMAP/GID2SID/10000636 1250027977/S-1-5-21-1834383793-1770918451-929701000-148240pÓX>Û7¤X™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-148240 1250027977/10000636p\ÖX;Í)º]™&IDMAP/UID2SID/10000358 1250027977/S-1-5-21-1834383793-1770918451-929701000-3687BBp èX<U9»—™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-3687 1250027977/10000358BBpàX=ÍSÑ™&IDMAP/UID2SID/10000357 1250017630/S-1-5-21-1834383793-1770918451-929701000-162816pˆÁX>Ë8%g™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-162816 1250017630/10000357pX±X=ÍÑF+™&IDMAP/UID2SID/10000354 1250010978/S-1-5-21-1834383793-1770918451-929701000-154305püßX>ï2îG™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-154305 1250010978/10000354p(-X=ÍOp…™&IDMAP/UID2SID/10000351 1250561251/S-1-5-21-1834383793-1770918451-929701000-148519p˜îX>Ç›÷³™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-148519 1250561251/10000351p0üX=òÍ™&IDMAP/UID2SID/10000349 1249930193/S-1-5-21-1834383793-1770918451-929701000-143850pX>ËîãÆ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143850 1249930193/10000349p8>X<CUA™&IDMAP/UID2SID/10000348 1250615979/S-1-5-21-1834383793-1770918451-929701000-95251BpìÜX=>J~™&IDMAP/GID2SID/10000611 1250805470/S-1-5-21-1834383793-1770918451-929701000-148264p(PX>Ûb²™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-148264 1250805470/10000611pHX&û¯ò0™&NBT/NEW-BRDC01.MTVNE.AD.VIACOM.COM#20 1251229216/166.77.172.123:00.64.99.4:389BBp8êX=ÝÌÆ™&IDMAP/GID2SID/10000609 1250805470/S-1-5-21-1834383793-1770918451-929701000-148266p¦X>ÛÂZ|™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-148266 1250805470/10000609p¸%X=Ý[’­™&IDMAP/GID2SID/10000607 1250805470/S-1-5-21-1834383793-1770918451-929701000-148262pHX>ÛÀè™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-148262 1250805470/10000607p DX=ݯX”™&IDMAP/GID2SID/10000605 1250805470/S-1-5-21-1834383793-1770918451-929701000-148267pXX>ÛrAá™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-148267 1250805470/10000605p¼áX=ÝHÕ™&IDMAP/GID2SID/10000600 1251130207/S-1-5-21-1834383793-1770918451-929701000-148521p (X>ß)ˆ ™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-148521 1251130207/10000600pÌ4X=m°¶ ™&IDMAP/UID2SID/10000336 1250902730/S-1-5-21-1834383793-1770918451-929701000-141062p ëX>ë¶N ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-141062 1250902730/10000336pPêX=i i™&IDMAP/UID2SID/10000341 1249604757/S-1-5-21-1834383793-1770918451-929701000-156773p°GX>0Xk™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-156773 1249604757/10000341pìX=“nÜ™&IDMAP/UID2SID/10000340 1249604757/S-1-5-21-1834383793-1770918451-929701000-107684p°çX>óÛ•ñ™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-107684 1249604757/10000340p,ðX=Ý:™&IDMAP/UID2SID/10000081 1251228438/S-1-5-21-1834383793-1770918451-929701000-129672p X>sF7|™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-129672 1251228438/10000081pðEX=ýo)™&IDMAP/GID2SID/10000469 1251133100/S-1-5-21-1834383793-1770918451-929701000-148514pL1X>_X™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-148514 1251133100/10000469p,ÛX=]z±™&IDMAP/UID2SID/10000303 1250197567/S-1-5-21-1834383793-1770918451-929701000-143720p˜X>ŸU™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-143720 1250197567/10000303p|ÜX=­¦§½™&IDMAP/GID2SID/10000471 1251825059/S-1-5-21-1834383793-1770918451-929701000-164035pÐOX>ÿ!Ç™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-164035 1251825059/10000471pX÷X=­Ð 1™&IDMAP/GID2SID/10000470 1251825058/S-1-5-21-1834383793-1770918451-929701000-153022pÐVX>ƒ¹{™™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-153022 1251825058/10000470pèóX= ]™&IDMAP/GID2SID/10000546 1249523691/S-1-5-21-1834383793-1770918451-929701000-156570pX>_…Ë™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-156570 1249523691/10000546pLúX=JlЙ&IDMAP/GID2SID/10000545 1249523690/S-1-5-21-1834383793-1770918451-929701000-163262p dX>Û¶‚™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-163262 1249523690/10000545p¨õX=tÏC™&IDMAP/GID2SID/10000544 1249523690/S-1-5-21-1834383793-1770918451-929701000-163260pÌÖX>ۢ踙&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-163260 1249523690/10000544pØX=ž2·™&IDMAP/GID2SID/10000543 1249523690/S-1-5-21-1834383793-1770918451-929701000-156572pLÓX>_åÝ”™&IDMAP/SID2GID/S-1-5-21-1834383793-1770918451-929701000-156572 1249523690/10000543pHøX=È•*™&IDMAP/GID2SID/10000542 1249523690/S-1-5-21-2140803266-1626024873-1299147156-24690p ÕX>º•]õ™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-24690 1249523690/10000542pP'X=òø™&IDMAP/GID2SID/10000541 1249523690/S-1-5-21-2140803266-1626024873-1299147156-34233p0DX> ~,™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-34233 1249523690/10000541pLèX=\™&IDMAP/GID2SID/10000540 1249523689/S-1-5-21-2140803266-1626024873-1299147156-34232p\ëX> ÎEž™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-34232 1249523689/10000540p8aX=í»zæ™&IDMAP/GID2SID/10000539 1249523689/S-1-5-21-2140803266-1626024873-1299147156-34226p8ZX>ŠX¹N™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-34226 1249523689/10000539p`þX<íåÝY™&IDMAP/GID2SID/10000538 1249523689/S-1-5-21-4186143834-2626045635-1021053583-1210Bp¬åX=Ÿ£Ô™&IDMAP/SID2GID/S-1-5-21-4186143834-2626045635-1021053583-1210 1249523689/10000538Bp8ÇX==Ñh™&IDMAP/GID2SID/10000323 1249523689/S-1-5-21-2140803266-1626024873-1299147156-14614ppX>º©S!™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-14614 1249523689/10000323p8 X=íAÍ™&IDMAP/GID2SID/10000537 1249523689/S-1-5-21-2140803266-1626024873-1299147156-34225pXÔX>ЍÒé™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-34225 1249523689/10000537p8)X=í9¤@™&IDMAP/GID2SID/10000536 1249523689/S-1-5-21-2140803266-1626024873-1299147156-21811pûX> ç™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-21811 1249523689/10000536p0‹X=íc´™&IDMAP/GID2SID/10000535 1249523689/S-1-5-21-2140803266-1626024873-1299147156-20386püØX>6ÓÒ$™&IDMAP/SID2GID/S-1-5-21-2140803266-1626024873-1299147156-20386 1249523689/10000535p€X;íj'™&IDMAP/GID2SID/10000534 1249523688/S-1-5-21-2129485696-1739684832-945835055-1850BBplàX<ùˆöw™&IDMAP/SID2GID/S-1-5-21-2129485696-1739684832-945835055-1850 1249523688/10000534BBp#X>s0ë·™&IDMAP/SID2UID/S-1-5-21-1834383793-1770918451-929701000-146636 1251836362/10000421ptdb-1.4.2/test/test_tdbbackup.sh0000770000000000000000000000220313444661620016541 0ustar rootroot00000000000000#!/bin/sh # Blackbox test for tdbbackup of given ldb or tdb database # Copyright (C) 2018 Andrew Bartlett if [ $# -lt 1 ]; then echo "Usage: $0 LDBFILE" exit 1; fi LDBFILE=$1 timestamp() { date -u +'time: %Y-%m-%d %H:%M:%S.%6NZ' | sed 's/\..*NZ$/.000000Z/' } subunit_fail_test () { timestamp printf 'failure: %s [\n' "$1" cat - echo "]" } testit () { name="$1" shift cmdline="$@" timestamp printf 'test: %s\n' "$1" output=`$cmdline 2>&1` status=$? if [ x$status = x0 ]; then timestamp printf 'success: %s\n' "$name" else echo "$output" | subunit_fail_test "$name" fi return $status } $BINDIR/tdbdump $LDBFILE | sort > orig_dump testit "normal tdbbackup on tdb file" $BINDIR/tdbbackup $LDBFILE -s .bak $BINDIR/tdbdump $LDBFILE.bak | sort > bak_dump testit "cmp between tdbdumps of original and backup" cmp orig_dump bak_dump rm $LDBFILE.bak rm bak_dump testit "readonly tdbbackup on tdb file" $BINDIR/tdbbackup $LDBFILE -s .bak -r $BINDIR/tdbdump $LDBFILE.bak | sort > bak_dump testit "cmp between tdbdumps of original and back dbs" cmp orig_dump bak_dump rm $LDBFILE.bak rm bak_dump rm orig_dump tdb-1.4.2/tools/tdbbackup.c0000660000000000000000000002021113444661620015470 0ustar rootroot00000000000000/* Unix SMB/CIFS implementation. low level tdb backup and restore utility Copyright (C) Andrew Tridgell 2002 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . */ /* This program is meant for backup/restore of tdb databases. Typical usage would be: tdbbackup *.tdb when Samba shuts down cleanly, which will make a backup of all the local databases to *.bak files. Then on Samba startup you would use: tdbbackup -v *.tdb and this will check the databases for corruption and if corruption is detected then the backup will be restored. You may also like to do a backup on a regular basis while Samba is running, perhaps using cron. The reason this program is needed is to cope with power failures while Samba is running. A power failure could lead to database corruption and Samba will then not start correctly. Note that many of the databases in Samba are transient and thus don't need to be backed up, so you can optimise the above a little by only running the backup on the critical databases. */ #include "replace.h" #include "system/locale.h" #include "system/time.h" #include "system/filesys.h" #include "system/wait.h" #include "tdb.h" #ifdef HAVE_GETOPT_H #include #endif static int failed; static struct tdb_logging_context log_ctx; #ifdef PRINTF_ATTRIBUTE static void tdb_log(struct tdb_context *tdb, enum tdb_debug_level level, const char *format, ...) PRINTF_ATTRIBUTE(3,4); #endif static void tdb_log(struct tdb_context *tdb, enum tdb_debug_level level, const char *format, ...) { va_list ap; va_start(ap, format); vfprintf(stdout, format, ap); va_end(ap); fflush(stdout); } static char *add_suffix(const char *name, const char *suffix) { char *ret; int len = strlen(name) + strlen(suffix) + 1; ret = (char *)malloc(len); if (!ret) { fprintf(stderr,"Out of memory!\n"); exit(1); } snprintf(ret, len, "%s%s", name, suffix); return ret; } static int copy_fn(TDB_CONTEXT *tdb, TDB_DATA key, TDB_DATA dbuf, void *state) { TDB_CONTEXT *tdb_new = (TDB_CONTEXT *)state; if (tdb_store(tdb_new, key, dbuf, TDB_INSERT) != 0) { fprintf(stderr,"Failed to insert into %s\n", tdb_name(tdb_new)); failed = 1; return 1; } return 0; } static int test_fn(TDB_CONTEXT *tdb, TDB_DATA key, TDB_DATA dbuf, void *state) { return 0; } /* carefully backup a tdb, validating the contents and only doing the backup if its OK this function is also used for restore */ static int backup_tdb(const char *old_name, const char *new_name, int hash_size, int nolock, bool readonly) { TDB_CONTEXT *tdb; TDB_CONTEXT *tdb_new; char *tmp_name; struct stat st; int count1, count2; tmp_name = add_suffix(new_name, ".tmp"); /* stat the old tdb to find its permissions */ if (stat(old_name, &st) != 0) { perror(old_name); free(tmp_name); return 1; } /* open the old tdb */ tdb = tdb_open_ex(old_name, 0, TDB_DEFAULT | (nolock ? TDB_NOLOCK : 0), O_RDWR, 0, &log_ctx, NULL); if (!tdb) { printf("Failed to open %s\n", old_name); free(tmp_name); return 1; } /* create the new tdb */ unlink(tmp_name); tdb_new = tdb_open_ex(tmp_name, hash_size ? hash_size : tdb_hash_size(tdb), TDB_DEFAULT, O_RDWR|O_CREAT|O_EXCL, st.st_mode & 0777, &log_ctx, NULL); if (!tdb_new) { perror(tmp_name); free(tmp_name); return 1; } if (readonly) { if (tdb_lockall_read(tdb) != 0) { printf("Failed to obtain read only lock on old tdb\n"); tdb_close(tdb); tdb_close(tdb_new); unlink(tmp_name); free(tmp_name); return 1; } } else if (tdb_transaction_start(tdb) != 0) { printf("Failed to start transaction on db\n"); tdb_close(tdb); tdb_close(tdb_new); unlink(tmp_name); free(tmp_name); return 1; } /* lock the backup tdb so that nobody else can change it */ if (tdb_lockall(tdb_new) != 0) { printf("Failed to lock backup tdb\n"); tdb_close(tdb); tdb_close(tdb_new); unlink(tmp_name); free(tmp_name); return 1; } failed = 0; /* traverse and copy */ if (readonly) { count1 = tdb_traverse_read(tdb, copy_fn, (void *)tdb_new); } else { count1 = tdb_traverse(tdb, copy_fn, (void *)tdb_new); } if (count1 < 0 || failed) { fprintf(stderr,"failed to copy %s\n", old_name); tdb_close(tdb); tdb_close(tdb_new); unlink(tmp_name); free(tmp_name); return 1; } /* close the old tdb */ tdb_close(tdb); /* copy done, unlock the backup tdb */ tdb_unlockall(tdb_new); #ifdef HAVE_FDATASYNC if (fdatasync(tdb_fd(tdb_new)) != 0) { #else if (fsync(tdb_fd(tdb_new)) != 0) { #endif /* not fatal */ fprintf(stderr, "failed to fsync backup file\n"); } /* close the new tdb and re-open read-only */ tdb_close(tdb_new); tdb_new = tdb_open_ex(tmp_name, 0, TDB_DEFAULT, O_RDONLY, 0, &log_ctx, NULL); if (!tdb_new) { fprintf(stderr,"failed to reopen %s\n", tmp_name); unlink(tmp_name); perror(tmp_name); free(tmp_name); return 1; } /* traverse the new tdb to confirm */ count2 = tdb_traverse(tdb_new, test_fn, NULL); if (count2 != count1) { fprintf(stderr,"failed to copy %s\n", old_name); tdb_close(tdb_new); unlink(tmp_name); free(tmp_name); return 1; } /* close the new tdb and rename it to .bak */ tdb_close(tdb_new); if (rename(tmp_name, new_name) != 0) { perror(new_name); free(tmp_name); return 1; } free(tmp_name); return 0; } /* verify a tdb and if it is corrupt then restore from *.bak */ static int verify_tdb(const char *fname, const char *bak_name) { TDB_CONTEXT *tdb; int count = -1; /* open the tdb */ tdb = tdb_open_ex(fname, 0, 0, O_RDONLY, 0, &log_ctx, NULL); /* traverse the tdb, then close it */ if (tdb) { count = tdb_traverse(tdb, test_fn, NULL); tdb_close(tdb); } /* count is < 0 means an error */ if (count < 0) { printf("restoring %s\n", fname); return backup_tdb(bak_name, fname, 0, 0, 0); } printf("%s : %d records\n", fname, count); return 0; } /* see if one file is newer than another */ static int file_newer(const char *fname1, const char *fname2) { struct stat st1, st2; if (stat(fname1, &st1) != 0) { return 0; } if (stat(fname2, &st2) != 0) { return 1; } return (st1.st_mtime > st2.st_mtime); } static void usage(void) { printf("Usage: tdbbackup [options] \n\n"); printf(" -h this help message\n"); printf(" -s suffix set the backup suffix\n"); printf(" -v verify mode (restore if corrupt)\n"); printf(" -n hashsize set the new hash size for the backup\n"); printf(" -l open without locking to back up mutex dbs\n"); printf(" -r open with read only locking\n"); } int main(int argc, char *argv[]) { int i; int ret = 0; int c; int verify = 0; int hashsize = 0; int nolock = 0; bool readonly = false; const char *suffix = ".bak"; log_ctx.log_fn = tdb_log; while ((c = getopt(argc, argv, "vhs:n:lr")) != -1) { switch (c) { case 'h': usage(); exit(0); case 'v': verify = 1; break; case 's': suffix = optarg; break; case 'n': hashsize = atoi(optarg); break; case 'l': nolock = 1; break; case 'r': readonly = true; } } argc -= optind; argv += optind; if (argc < 1) { usage(); exit(1); } for (i=0; i. */ #include "replace.h" #include "system/locale.h" #include "system/time.h" #include "system/filesys.h" #include "system/wait.h" #include "tdb.h" static void print_data(TDB_DATA d) { unsigned char *p = (unsigned char *)d.dptr; int len = d.dsize; while (len--) { if (isprint(*p) && !strchr("\"\\", *p)) { fputc(*p, stdout); } else { printf("\\%02X", *p); } p++; } } static int traverse_fn(TDB_CONTEXT *tdb, TDB_DATA key, TDB_DATA dbuf, void *state) { printf("{\n"); printf("key(%zu) = \"", key.dsize); print_data(key); printf("\"\n"); printf("data(%zu) = \"", dbuf.dsize); print_data(dbuf); printf("\"\n"); printf("}\n"); return 0; } static void log_stderr(struct tdb_context *tdb, enum tdb_debug_level level, const char *fmt, ...) PRINTF_ATTRIBUTE(3,4); static void log_stderr(struct tdb_context *tdb, enum tdb_debug_level level, const char *fmt, ...) { va_list ap; const char *name = tdb_name(tdb); const char *prefix = ""; if (!name) name = "unnamed"; switch (level) { case TDB_DEBUG_ERROR: prefix = "ERROR: "; break; case TDB_DEBUG_WARNING: prefix = "WARNING: "; break; case TDB_DEBUG_TRACE: return; default: case TDB_DEBUG_FATAL: prefix = "FATAL: "; break; } va_start(ap, fmt); fprintf(stderr, "tdb(%s): %s", name, prefix); vfprintf(stderr, fmt, ap); va_end(ap); } static void emergency_walk(TDB_DATA key, TDB_DATA dbuf, void *keyname) { if (keyname) { if (key.dsize != strlen(keyname)) return; if (memcmp(key.dptr, keyname, key.dsize) != 0) return; } traverse_fn(NULL, key, dbuf, NULL); } static int dump_tdb(const char *fname, const char *keyname, bool emergency) { TDB_CONTEXT *tdb; TDB_DATA key, value; struct tdb_logging_context logfn = { .log_fn = log_stderr, }; int tdb_flags = TDB_DEFAULT; /* * Note: that O_RDONLY implies TDB_NOLOCK, but we want to make it * explicit as it's important when working on databases which were * created with mutex locking. */ tdb_flags |= TDB_NOLOCK; tdb = tdb_open_ex(fname, 0, tdb_flags, O_RDONLY, 0, &logfn, NULL); if (!tdb) { printf("Failed to open %s\n", fname); return 1; } if (emergency) { return tdb_rescue(tdb, emergency_walk, discard_const(keyname)) == 0; } if (!keyname) { return tdb_traverse(tdb, traverse_fn, NULL) == -1 ? 1 : 0; } else { key.dptr = discard_const_p(uint8_t, keyname); key.dsize = strlen(keyname); value = tdb_fetch(tdb, key); if (!value.dptr) { return 1; } else { print_data(value); free(value.dptr); } } return 0; } static void usage( void) { printf( "Usage: tdbdump [options] \n\n"); printf( " -h this help message\n"); printf( " -k keyname dumps value of keyname\n"); printf( " -e emergency dump, for corrupt databases\n"); } int main(int argc, char *argv[]) { char *fname, *keyname=NULL; bool emergency = false; int c; if (argc < 2) { printf("Usage: tdbdump \n"); exit(1); } while ((c = getopt( argc, argv, "hk:e")) != -1) { switch (c) { case 'h': usage(); exit( 0); case 'k': keyname = optarg; break; case 'e': emergency = true; break; default: usage(); exit( 1); } } fname = argv[optind]; return dump_tdb(fname, keyname, emergency); } tdb-1.4.2/tools/tdbrestore.c0000660000000000000000000001013012553526140015702 0ustar rootroot00000000000000/* tdbrestore -- construct a tdb from tdbdump output. Copyright (C) Volker Lendecke 2010 Copyright (C) Simon McVittie 2005 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . */ #include "replace.h" #include #include "system/locale.h" #include "system/time.h" #include "system/filesys.h" #include "system/wait.h" #include "tdb.h" static int read_linehead(FILE *f) { int i, c; int num_bytes; char prefix[128]; while (1) { c = getc(f); if (c == EOF) { return -1; } if (c == '(') { break; } } for (i=0; idptr = (unsigned char *)malloc(size); if (d->dptr == NULL) { return -1; } d->dsize = size; for (i=0; idptr[i] = (low|high); } else { d->dptr[i] = c; } } return 0; } static int swallow(FILE *f, const char *s, int *eof) { char line[128]; if (fgets(line, sizeof(line), f) == NULL) { if (eof != NULL) { *eof = 1; } return -1; } if (strcmp(line, s) != 0) { return -1; } return 0; } static int read_rec(FILE *f, TDB_CONTEXT *tdb, int *eof) { int length; TDB_DATA key, data; int ret = -1; key.dptr = NULL; data.dptr = NULL; if (swallow(f, "{\n", eof) == -1) { goto fail; } length = read_linehead(f); if (length == -1) { goto fail; } if (read_data(f, &key, length) == -1) { goto fail; } if (swallow(f, "\"\n", NULL) == -1) { goto fail; } length = read_linehead(f); if (length == -1) { goto fail; } if (read_data(f, &data, length) == -1) { goto fail; } if ((swallow(f, "\"\n", NULL) == -1) || (swallow(f, "}\n", NULL) == -1)) { goto fail; } if (tdb_store(tdb, key, data, TDB_INSERT) != 0) { fprintf(stderr, "TDB error: %s\n", tdb_errorstr(tdb)); goto fail; } ret = 0; fail: free(key.dptr); free(data.dptr); return ret; } static int restore_tdb(const char *fname) { TDB_CONTEXT *tdb; tdb = tdb_open(fname, 0, 0, O_RDWR|O_CREAT|O_EXCL, 0666); if (!tdb) { perror("tdb_open"); fprintf(stderr, "Failed to open %s\n", fname); return 1; } while (1) { int eof = 0; if (read_rec(stdin, tdb, &eof) == -1) { if (eof) { break; } return 1; } } if (tdb_close(tdb)) { fprintf(stderr, "Error closing tdb\n"); return 1; } return 0; } int main(int argc, char *argv[]) { char *fname; if (argc < 2) { printf("Usage: %s dbname < tdbdump_output\n", argv[0]); exit(1); } fname = argv[1]; return restore_tdb(fname); } tdb-1.4.2/tools/tdbtest.c0000660000000000000000000001223712406075657015221 0ustar rootroot00000000000000/* a test program for tdb - the trivial database */ #include "replace.h" #include "tdb.h" #include "system/filesys.h" #include "system/time.h" #include #define DELETE_PROB 7 #define STORE_PROB 5 static struct tdb_context *db; static GDBM_FILE gdbm; struct timeval tp1,tp2; static void _start_timer(void) { gettimeofday(&tp1,NULL); } static double _end_timer(void) { gettimeofday(&tp2,NULL); return((tp2.tv_sec - tp1.tv_sec) + (tp2.tv_usec - tp1.tv_usec)*1.0e-6); } static void fatal(const char *why) { perror(why); exit(1); } #ifdef PRINTF_ATTRIBUTE static void tdb_log(struct tdb_context *tdb, int level, const char *format, ...) PRINTF_ATTRIBUTE(3,4); #endif static void tdb_log(struct tdb_context *tdb, int level, const char *format, ...) { va_list ap; va_start(ap, format); vfprintf(stdout, format, ap); va_end(ap); fflush(stdout); } static void compare_db(void) { TDB_DATA d, key, nextkey; datum gd, gkey, gnextkey; key = tdb_firstkey(db); while (key.dptr) { d = tdb_fetch(db, key); gkey.dptr = key.dptr; gkey.dsize = key.dsize; gd = gdbm_fetch(gdbm, gkey); if (!gd.dptr) fatal("key not in gdbm"); if (gd.dsize != d.dsize) fatal("data sizes differ"); if (memcmp(gd.dptr, d.dptr, d.dsize)) { fatal("data differs"); } nextkey = tdb_nextkey(db, key); free(key.dptr); free(d.dptr); free(gd.dptr); key = nextkey; } gkey = gdbm_firstkey(gdbm); while (gkey.dptr) { gd = gdbm_fetch(gdbm, gkey); key.dptr = gkey.dptr; key.dsize = gkey.dsize; d = tdb_fetch(db, key); if (!d.dptr) fatal("key not in db"); if (d.dsize != gd.dsize) fatal("data sizes differ"); if (memcmp(d.dptr, gd.dptr, gd.dsize)) { fatal("data differs"); } gnextkey = gdbm_nextkey(gdbm, gkey); free(gkey.dptr); free(gd.dptr); free(d.dptr); gkey = gnextkey; } } static char *randbuf(int len) { char *buf; int i; buf = (char *)malloc(len+1); for (i=0;i. */ #include "replace.h" #include "system/locale.h" #include "system/time.h" #include "system/filesys.h" #include "system/wait.h" #include "tdb.h" static int do_command(void); const char *cmdname; char *arg1, *arg2; size_t arg1len, arg2len; int bIterate = 0; char *line; TDB_DATA iterate_kbuf; char cmdline[1024]; static int disable_mmap; static int _disable_lock; enum commands { CMD_CREATE_TDB, CMD_OPEN_TDB, CMD_TRANSACTION_START, CMD_TRANSACTION_COMMIT, CMD_TRANSACTION_CANCEL, CMD_ERASE, CMD_DUMP, CMD_INSERT, CMD_MOVE, CMD_STOREHEX, CMD_STORE, CMD_SHOW, CMD_KEYS, CMD_HEXKEYS, CMD_DELETE, CMD_LIST_HASH_FREE, CMD_LIST_FREE, CMD_FREELIST_SIZE, CMD_INFO, CMD_MMAP, CMD_SPEED, CMD_FIRST, CMD_NEXT, CMD_SYSTEM, CMD_CHECK, CMD_REPACK, CMD_QUIT, CMD_HELP }; typedef struct { const char *name; enum commands cmd; } COMMAND_TABLE; COMMAND_TABLE cmd_table[] = { {"create", CMD_CREATE_TDB}, {"open", CMD_OPEN_TDB}, {"transaction_start", CMD_TRANSACTION_START}, {"transaction_commit", CMD_TRANSACTION_COMMIT}, {"transaction_cancel", CMD_TRANSACTION_CANCEL}, {"erase", CMD_ERASE}, {"dump", CMD_DUMP}, {"insert", CMD_INSERT}, {"move", CMD_MOVE}, {"storehex", CMD_STOREHEX}, {"store", CMD_STORE}, {"show", CMD_SHOW}, {"keys", CMD_KEYS}, {"hexkeys", CMD_HEXKEYS}, {"delete", CMD_DELETE}, {"list", CMD_LIST_HASH_FREE}, {"free", CMD_LIST_FREE}, {"freelist_size", CMD_FREELIST_SIZE}, {"info", CMD_INFO}, {"speed", CMD_SPEED}, {"mmap", CMD_MMAP}, {"first", CMD_FIRST}, {"1", CMD_FIRST}, {"next", CMD_NEXT}, {"n", CMD_NEXT}, {"check", CMD_CHECK}, {"quit", CMD_QUIT}, {"q", CMD_QUIT}, {"!", CMD_SYSTEM}, {"repack", CMD_REPACK}, {NULL, CMD_HELP} }; struct timeval tp1,tp2; static void _start_timer(void) { gettimeofday(&tp1,NULL); } static double _end_timer(void) { gettimeofday(&tp2,NULL); return((tp2.tv_sec - tp1.tv_sec) + (tp2.tv_usec - tp1.tv_usec)*1.0e-6); } #ifdef PRINTF_ATTRIBUTE static void tdb_log_open(struct tdb_context *tdb, enum tdb_debug_level level, const char *format, ...) PRINTF_ATTRIBUTE(3,4); #endif static void tdb_log_open(struct tdb_context *tdb, enum tdb_debug_level level, const char *format, ...) { const char *mutex_msg = "Can use mutexes only with MUTEX_LOCKING or NOLOCK\n"; char *p; va_list ap; p = strstr(format, mutex_msg); if (p != NULL) { /* * Yes, this is a hack, but we don't want to see this * message on first open, but we want to see * everything else. */ return; } va_start(ap, format); vfprintf(stderr, format, ap); va_end(ap); } #ifdef PRINTF_ATTRIBUTE static void tdb_log(struct tdb_context *tdb, enum tdb_debug_level level, const char *format, ...) PRINTF_ATTRIBUTE(3,4); #endif static void tdb_log(struct tdb_context *tdb, enum tdb_debug_level level, const char *format, ...) { va_list ap; va_start(ap, format); vfprintf(stderr, format, ap); va_end(ap); } /* a tdb tool for manipulating a tdb database */ static TDB_CONTEXT *tdb; static int print_rec(TDB_CONTEXT *the_tdb, TDB_DATA key, TDB_DATA dbuf, void *state); static int print_key(TDB_CONTEXT *the_tdb, TDB_DATA key, TDB_DATA dbuf, void *state); static int print_hexkey(TDB_CONTEXT *the_tdb, TDB_DATA key, TDB_DATA dbuf, void *state); static void print_asc(const char *buf,int len) { int i; /* We're probably printing ASCII strings so don't try to display the trailing NULL character. */ if (buf[len - 1] == 0) len--; for (i=0;i8) printf(" "); while (n--) printf(" "); n = i%16; if (n > 8) n = 8; print_asc(&buf[i-(i%16)],n); printf(" "); n = (i%16) - n; if (n>0) print_asc(&buf[i-n],n); printf("\n"); } } static void help(void) { printf("\n" "tdbtool: \n" " create dbname : create a database\n" " open dbname : open an existing database\n" " transaction_start : start a transaction\n" " transaction_commit : commit a transaction\n" " transaction_cancel : cancel a transaction\n" " erase : erase the database\n" " dump : dump the database as strings\n" " keys : dump the database keys as strings\n" " hexkeys : dump the database keys as hex values\n" " info : print summary info about the database\n" " insert key data : insert a record\n" " move key file : move a record to a destination tdb\n" " storehex key data : store a record (replace), key/value in hex format\n" " store key data : store a record (replace)\n" " show key : show a record by key\n" " delete key : delete a record by key\n" " list : print the database hash table and freelist\n" " free : print the database freelist\n" " freelist_size : print the number of records in the freelist\n" " check : check the integrity of an opened database\n" " repack : repack the database\n" " speed : perform speed tests on the database\n" " ! command : execute system command\n" " 1 | first : print the first record\n" " n | next : print the next record\n" " q | quit : terminate\n" " \\n : repeat 'next' command\n" "\n"); } static void terror(const char *why) { printf("%s\n", why); } static void create_tdb(const char *tdbname) { struct tdb_logging_context log_ctx = { NULL, NULL}; log_ctx.log_fn = tdb_log; if (tdb) tdb_close(tdb); tdb = tdb_open_ex(tdbname, 0, TDB_CLEAR_IF_FIRST | (disable_mmap?TDB_NOMMAP:0) | (_disable_lock?TDB_NOLOCK:0), O_RDWR | O_CREAT | O_TRUNC, 0600, &log_ctx, NULL); if (!tdb) { printf("Could not create %s: %s\n", tdbname, strerror(errno)); } } static void open_tdb(const char *tdbname) { struct tdb_logging_context log_ctx = { NULL, NULL }; log_ctx.log_fn = tdb_log_open; if (tdb) tdb_close(tdb); tdb = tdb_open_ex(tdbname, 0, (disable_mmap?TDB_NOMMAP:0) | (_disable_lock?TDB_NOLOCK:0), O_RDWR, 0600, &log_ctx, NULL); log_ctx.log_fn = tdb_log; if (tdb != NULL) { tdb_set_logging_function(tdb, &log_ctx); } if ((tdb == NULL) && (errno == EINVAL)) { /* * Retry NOLOCK and readonly. There we want to see all * error messages. */ tdb = tdb_open_ex(tdbname, 0, (disable_mmap?TDB_NOMMAP:0) |TDB_NOLOCK, O_RDONLY, 0600, &log_ctx, NULL); } if (!tdb) { printf("Could not open %s: %s\n", tdbname, strerror(errno)); } } static void insert_tdb(char *keyname, size_t keylen, char* data, size_t datalen) { TDB_DATA key, dbuf; if ((keyname == NULL) || (keylen == 0)) { terror("need key"); return; } key.dptr = (unsigned char *)keyname; key.dsize = keylen; dbuf.dptr = (unsigned char *)data; dbuf.dsize = datalen; if (tdb_store(tdb, key, dbuf, TDB_INSERT) != 0) { terror("insert failed"); } } static void store_tdb(char *keyname, size_t keylen, char* data, size_t datalen) { TDB_DATA key, dbuf; if ((keyname == NULL) || (keylen == 0)) { terror("need key"); return; } if ((data == NULL) || (datalen == 0)) { terror("need data"); return; } key.dptr = (unsigned char *)keyname; key.dsize = keylen; dbuf.dptr = (unsigned char *)data; dbuf.dsize = datalen; printf("Storing key:\n"); print_rec(tdb, key, dbuf, NULL); if (tdb_store(tdb, key, dbuf, TDB_REPLACE) != 0) { terror("store failed"); } } static bool hexchar(char c, uint8_t *v) { if ((c >= '0') && (c <= '9')) { *v = (c - '0'); return true; } if ((c >= 'A') && (c <= 'F')) { *v = (c - 'A' + 10); return true; } if ((c >= 'a') && (c <= 'f')) { *v = (c - 'a' + 10); return true; } return false; } static bool parse_hex(const char *src, size_t srclen, uint8_t *dst) { size_t i=0; if ((srclen % 2) != 0) { return false; } while (iname) { cmd_len = strlen(ctp->name); if (strncmp(ctp->name,cmdname,cmd_len) == 0) { mycmd = ctp->cmd; break; } ctp++; } } } switch (mycmd) { case CMD_CREATE_TDB: bIterate = 0; create_tdb(arg1); return 0; case CMD_OPEN_TDB: bIterate = 0; open_tdb(arg1); return 0; case CMD_SYSTEM: /* Shell command */ if (system(arg1) == -1) { terror("system() call failed\n"); } return 0; case CMD_QUIT: return 1; default: /* all the rest require a open database */ if (!tdb) { bIterate = 0; terror("database not open"); help(); return 0; } switch (mycmd) { case CMD_TRANSACTION_START: bIterate = 0; tdb_transaction_start(tdb); return 0; case CMD_TRANSACTION_COMMIT: bIterate = 0; tdb_transaction_commit(tdb); return 0; case CMD_REPACK: bIterate = 0; tdb_repack(tdb); return 0; case CMD_TRANSACTION_CANCEL: bIterate = 0; tdb_transaction_cancel(tdb); return 0; case CMD_ERASE: bIterate = 0; tdb_wipe_all(tdb); return 0; case CMD_DUMP: bIterate = 0; tdb_traverse(tdb, print_rec, NULL); return 0; case CMD_INSERT: bIterate = 0; insert_tdb(arg1, arg1len,arg2,arg2len); return 0; case CMD_MOVE: bIterate = 0; move_rec(arg1,arg1len,arg2); return 0; case CMD_STORE: bIterate = 0; store_tdb(arg1,arg1len,arg2,arg2len); return 0; case CMD_STOREHEX: bIterate = 0; store_hex_tdb(arg1,arg1len,arg2,arg2len); return 0; case CMD_SHOW: bIterate = 0; show_tdb(arg1, arg1len); return 0; case CMD_KEYS: tdb_traverse(tdb, print_key, NULL); return 0; case CMD_HEXKEYS: tdb_traverse(tdb, print_hexkey, NULL); return 0; case CMD_DELETE: bIterate = 0; delete_tdb(arg1,arg1len); return 0; case CMD_LIST_HASH_FREE: tdb_dump_all(tdb); return 0; case CMD_LIST_FREE: tdb_printfreelist(tdb); return 0; case CMD_FREELIST_SIZE: { int size; size = tdb_freelist_size(tdb); if (size < 0) { printf("Error getting freelist size.\n"); } else { printf("freelist size: %d\n", size); } return 0; } case CMD_INFO: info_tdb(); return 0; case CMD_SPEED: speed_tdb(arg1); return 0; case CMD_MMAP: toggle_mmap(); return 0; case CMD_FIRST: bIterate = 1; first_record(tdb, &iterate_kbuf); return 0; case CMD_NEXT: if (bIterate) next_record(tdb, &iterate_kbuf); return 0; case CMD_CHECK: check_db(tdb); return 0; case CMD_HELP: help(); return 0; case CMD_CREATE_TDB: case CMD_OPEN_TDB: case CMD_SYSTEM: case CMD_QUIT: /* * unhandled commands. cases included here to avoid compiler * warnings. */ return 0; } } return 0; } static char *tdb_convert_string(char *instring, size_t *sizep) { size_t length = 0; char *outp, *inp; char temp[3]; outp = inp = instring; while (*inp) { if (*inp == '\\') { inp++; if (*inp && strchr("0123456789abcdefABCDEF",(int)*inp)) { temp[0] = *inp++; temp[1] = '\0'; if (*inp && strchr("0123456789abcdefABCDEF",(int)*inp)) { temp[1] = *inp++; temp[2] = '\0'; } *outp++ = (char)strtol((const char *)temp,NULL,16); } else { *outp++ = *inp++; } } else { *outp++ = *inp++; } length++; } *sizep = length; return instring; } int main(int argc, char *argv[]) { cmdname = ""; arg1 = NULL; arg1len = 0; arg2 = NULL; arg2len = 0; if (argv[1] && (strcmp(argv[1], "-l") == 0)) { _disable_lock = 1; argv[1] = argv[0]; argv += 1; argc -= 1; } if (argv[1]) { cmdname = "open"; arg1 = argv[1]; do_command(); cmdname = ""; arg1 = NULL; } switch (argc) { case 1: case 2: /* Interactive mode */ while ((cmdname = tdb_getline("tdb> "))) { arg2 = arg1 = NULL; if ((arg1 = strchr((const char *)cmdname,' ')) != NULL) { arg1++; arg2 = arg1; while (*arg2) { if (*arg2 == ' ') { *arg2++ = '\0'; break; } if ((*arg2++ == '\\') && (*arg2 == ' ')) { arg2++; } } } if (arg1) arg1 = tdb_convert_string(arg1,&arg1len); if (arg2) arg2 = tdb_convert_string(arg2,&arg2len); if (do_command()) break; } break; case 5: arg2 = tdb_convert_string(argv[4],&arg2len); FALL_THROUGH; case 4: arg1 = tdb_convert_string(argv[3],&arg1len); FALL_THROUGH; case 3: cmdname = argv[2]; FALL_THROUGH; default: do_command(); break; } if (tdb) tdb_close(tdb); return 0; } tdb-1.4.2/tools/tdbtorture.c0000660000000000000000000002367413444661620015747 0ustar rootroot00000000000000/* this tests tdb by doing lots of ops from several simultaneous writers - that stresses the locking code. */ #include "replace.h" #include "system/time.h" #include "system/wait.h" #include "system/filesys.h" #include "tdb.h" #ifdef HAVE_GETOPT_H #include #endif #define REOPEN_PROB 30 #define DELETE_PROB 8 #define STORE_PROB 4 #define APPEND_PROB 6 #define TRANSACTION_PROB 10 #define TRANSACTION_PREPARE_PROB 2 #define LOCKSTORE_PROB 5 #define TRAVERSE_PROB 20 #define TRAVERSE_READ_PROB 20 #define CULL_PROB 100 #define KEYLEN 3 #define DATALEN 100 static struct tdb_context *db; static int in_transaction; static int error_count; static int always_transaction = 0; static int hash_size = 2; static unsigned loopnum; static int count_pipe; static bool mutex = false; static struct tdb_logging_context log_ctx; #ifdef PRINTF_ATTRIBUTE static void tdb_log(struct tdb_context *tdb, enum tdb_debug_level level, const char *format, ...) PRINTF_ATTRIBUTE(3,4); #endif static void tdb_log(struct tdb_context *tdb, enum tdb_debug_level level, const char *format, ...) { va_list ap; /* trace level messages do not indicate an error */ if (level != TDB_DEBUG_TRACE) { error_count++; } va_start(ap, format); vfprintf(stdout, format, ap); va_end(ap); fflush(stdout); #if 0 if (level != TDB_DEBUG_TRACE) { char *ptr; signal(SIGUSR1, SIG_IGN); asprintf(&ptr,"xterm -e gdb /proc/%d/exe %d", getpid(), getpid()); system(ptr); free(ptr); } #endif } static void fatal(const char *why) { perror(why); error_count++; } static char *randbuf(int len) { char *buf; int i; buf = (char *)malloc(len+1); for (i=0;i ldb

tdb

TDB is a Trivial Database. In concept, it is very much like GDBM, and BSD's DB except that it allows multiple simultaneous writers and uses locking internally to keep writers from trampling on each other. TDB is also extremely small.

Download

You can download the latest releases of tdb from the tdb directory on the samba public source archive.

Discussion and bug reports

tdb does not currently have its own mailing list or bug tracking system. For now, please use the samba-technical mailing list, and the Samba bugzilla bug tracking system.

Download

You can download the latest code either via git or rsync.

To fetch via git see the following guide:
Using Git for Samba Development
Once you have cloned the tree switch to the master branch and cd into the source/lib/tdb directory.

To fetch via rsync use these commands:
  rsync -Pavz samba.org::ftp/unpacked/standalone_projects/lib/tdb .
  rsync -Pavz samba.org::ftp/unpacked/standalone_projects/lib/replace .
and build in tdb. It will find the replace library in the directory above automatically. tdb-1.4.2/wscript0000660000000000000000000002057113527011454013650 0ustar rootroot00000000000000#!/usr/bin/env python APPNAME = 'tdb' VERSION = '1.4.2' import sys, os # find the buildtools directory top = '.' while not os.path.exists(top+'/buildtools') and len(top.split('/')) < 5: top = top + '/..' sys.path.insert(0, top + '/buildtools/wafsamba') out = 'bin' import wafsamba from wafsamba import samba_dist, samba_utils from waflib import Options, Logs, Context import shutil samba_dist.DIST_DIRS('lib/tdb:. lib/replace:lib/replace buildtools:buildtools third_party/waf:third_party/waf') tdb1_unit_tests = [ 'run-3G-file', 'run-bad-tdb-header', 'run', 'run-check', 'run-corrupt', 'run-die-during-transaction', 'run-endian', 'run-incompatible', 'run-nested-transactions', 'run-nested-traverse', 'run-no-lock-during-traverse', 'run-oldhash', 'run-open-during-transaction', 'run-readonly-check', 'run-rescue', 'run-rescue-find_entry', 'run-rdlock-upgrade', 'run-rwlock-check', 'run-summary', 'run-transaction-expand', 'run-traverse-in-transaction', 'run-wronghash-fail', 'run-zero-append', 'run-fcntl-deadlock', 'run-marklock-deadlock', 'run-allrecord-traverse-deadlock', 'run-mutex-openflags2', 'run-mutex-trylock', 'run-mutex-allrecord-bench', 'run-mutex-allrecord-trylock', 'run-mutex-allrecord-block', 'run-mutex-transaction1', 'run-mutex-die', 'run-mutex1', 'run-circular-chain', 'run-circular-freelist', 'run-traverse-chain', ] def options(opt): opt.BUILTIN_DEFAULT('replace') opt.PRIVATE_EXTENSION_DEFAULT('tdb', noextension='tdb') opt.RECURSE('lib/replace') opt.add_option('--disable-tdb-mutex-locking', help=("Disable the use of pthread robust mutexes"), action="store_true", dest='disable_tdb_mutex_locking', default=False) def configure(conf): conf.env.disable_tdb_mutex_locking = getattr(Options.options, 'disable_tdb_mutex_locking', False) if not conf.env.disable_tdb_mutex_locking: conf.env.replace_add_global_pthread = True conf.RECURSE('lib/replace') conf.env.standalone_tdb = conf.IN_LAUNCH_DIR() conf.env.building_tdb = True if not conf.env.standalone_tdb: if conf.CHECK_BUNDLED_SYSTEM_PKG('tdb', minversion=VERSION, implied_deps='replace'): conf.define('USING_SYSTEM_TDB', 1) conf.env.building_tdb = False if not conf.env.disable_python and \ conf.CHECK_BUNDLED_SYSTEM_PYTHON('pytdb', 'tdb', minversion=VERSION): conf.define('USING_SYSTEM_PYTDB', 1) if (conf.CONFIG_SET('HAVE_ROBUST_MUTEXES') and conf.env.building_tdb and not conf.env.disable_tdb_mutex_locking): conf.define('USE_TDB_MUTEX_LOCKING', 1) conf.CHECK_XSLTPROC_MANPAGES() conf.SAMBA_CHECK_PYTHON() conf.SAMBA_CHECK_PYTHON_HEADERS() conf.SAMBA_CONFIG_H() conf.SAMBA_CHECK_UNDEFINED_SYMBOL_FLAGS() def build(bld): bld.RECURSE('lib/replace') COMMON_FILES='''check.c error.c tdb.c traverse.c freelistcheck.c lock.c dump.c freelist.c io.c open.c transaction.c hash.c summary.c rescue.c mutex.c''' COMMON_SRC = bld.SUBDIR('common', COMMON_FILES) if bld.env.standalone_tdb: bld.env.PKGCONFIGDIR = '${LIBDIR}/pkgconfig' private_library = False else: private_library = True if not bld.CONFIG_SET('USING_SYSTEM_TDB'): tdb_deps = 'replace' if bld.CONFIG_SET('USE_TDB_MUTEX_LOCKING'): tdb_deps += ' pthread' bld.SAMBA_LIBRARY('tdb', COMMON_SRC, deps=tdb_deps, includes='include', abi_directory='ABI', abi_match='tdb_*', hide_symbols=True, vnum=VERSION, public_headers=('' if private_library else 'include/tdb.h'), public_headers_install=not private_library, pc_files='tdb.pc', private_library=private_library) bld.SAMBA_BINARY('tdbtorture', 'tools/tdbtorture.c', 'tdb', install=False) bld.SAMBA_BINARY('tdbrestore', 'tools/tdbrestore.c', 'tdb', manpages='man/tdbrestore.8') bld.SAMBA_BINARY('tdbdump', 'tools/tdbdump.c', 'tdb', manpages='man/tdbdump.8') bld.SAMBA_BINARY('tdbbackup', 'tools/tdbbackup.c', 'tdb', manpages='man/tdbbackup.8') bld.SAMBA_BINARY('tdbtool', 'tools/tdbtool.c', 'tdb', manpages='man/tdbtool.8') if bld.env.standalone_tdb: # FIXME: This hardcoded list is stupid, stupid, stupid. bld.SAMBA_SUBSYSTEM('tdb-test-helpers', 'test/external-agent.c test/lock-tracking.c test/logging.c', tdb_deps, includes='include') for t in tdb1_unit_tests: b = "tdb1-" + t s = "test/" + t + ".c" bld.SAMBA_BINARY(b, s, 'replace tdb-test-helpers', includes='include', install=False) if not bld.CONFIG_SET('USING_SYSTEM_PYTDB'): bld.SAMBA_PYTHON('pytdb', 'pytdb.c', deps='tdb', enabled=not bld.env.disable_python, realname='tdb.so', cflags='-DPACKAGE_VERSION=\"%s\"' % VERSION) if not bld.env.disable_python: bld.SAMBA_SCRIPT('_tdb_text.py', pattern='_tdb_text.py', installdir='python') bld.INSTALL_FILES('${PYTHONARCHDIR}', '_tdb_text.py') def testonly(ctx): '''run tdb testsuite''' ecode = 0 blddir = Context.g_module.out test_prefix = "%s/st" % (blddir) shutil.rmtree(test_prefix, ignore_errors=True) os.makedirs(test_prefix) os.environ['TEST_DATA_PREFIX'] = test_prefix env = samba_utils.LOAD_ENVIRONMENT() # FIXME: This is horrible :( if env.building_tdb: # Create scratch directory for tests. testdir = os.path.join(test_prefix, 'tdb-tests') samba_utils.mkdir_p(testdir) # Symlink back to source dir so it can find tests in test/ link = os.path.join(testdir, 'test') if not os.path.exists(link): os.symlink(ctx.path.make_node('test').abspath(), link) sh_tests = ["test/test_tdbbackup.sh test/jenkins-be-hash.tdb"] for sh_test in sh_tests: cmd = "BINDIR=%s %s" % (blddir, sh_test) print("shell test: " + cmd) ret = samba_utils.RUN_COMMAND(cmd) if ret != 0: print("%s sh test failed" % cmd) ecode = ret break for t in tdb1_unit_tests: f = "tdb1-" + t cmd = "cd " + testdir + " && " + os.path.abspath(os.path.join(blddir, f)) + " > test-output 2>&1" print("..." + f) ret = samba_utils.RUN_COMMAND(cmd) if ret != 0: print("%s failed:" % f) samba_utils.RUN_COMMAND("cat " + os.path.join(testdir, 'test-output')) ecode = ret break if ecode == 0: cmd = os.path.join(blddir, 'tdbtorture') ret = samba_utils.RUN_COMMAND(cmd) print("testsuite returned %d" % ret) if ret != 0: ecode = ret pyret = samba_utils.RUN_PYTHON_TESTS(['python/tests/simple.py']) print("python testsuite returned %d" % pyret) sys.exit(ecode or pyret) # WAF doesn't build the unit tests for this, maybe because they don't link with tdb? # This forces it def test(ctx): Options.commands.append('build') Options.commands.append('testonly') def dist(): '''makes a tarball for distribution''' samba_dist.dist() def reconfigure(ctx): '''reconfigure if config scripts have changed''' samba_utils.reconfigure(ctx) tdb-1.4.2/lib/replace/.checker_innocent0000660000000000000000000000024612406075657017724 0ustar rootroot00000000000000>>>MISTAKE21_create_files_6a9e68ada99a97cb >>>MISTAKE21_os2_delete_9b2bfa7f38711d09 >>>MISTAKE21_os2_delete_2fcc29aaa99a97cb >>>SECURITY2_os2_delete_9b2bfa7f1c9396ca tdb-1.4.2/lib/replace/Makefile0000660000000000000000000000153413444661620016055 0ustar rootroot00000000000000# simple makefile wrapper to run waf WAF_BINARY=$(PYTHON) ../../buildtools/bin/waf WAF=PYTHONHASHSEED=1 WAF_MAKE=1 $(WAF_BINARY) all: $(WAF) build install: $(WAF) install uninstall: $(WAF) uninstall test: $(WAF) test $(TEST_OPTIONS) testenv: $(WAF) test --testenv $(TEST_OPTIONS) quicktest: $(WAF) test --quick $(TEST_OPTIONS) dist: touch .tmplock WAFLOCK=.tmplock $(WAF) dist distcheck: touch .tmplock WAFLOCK=.tmplock $(WAF) distcheck clean: $(WAF) clean distclean: $(WAF) distclean reconfigure: configure $(WAF) reconfigure show_waf_options: $(WAF) --help # some compatibility make targets everything: all testsuite: all check: test torture: all # this should do an install as well, once install is finished installcheck: test etags: $(WAF) etags ctags: $(WAF) ctags bin/%:: FORCE $(WAF) --targets=`basename $@` FORCE: tdb-1.4.2/lib/replace/README0000660000000000000000000000305113444661620015271 0ustar rootroot00000000000000This subsystem ensures that we can always use a certain core set of functions and types, that are either provided by the OS or by replacement functions / definitions in this subsystem. The aim is to try to stick to POSIX functions in here as much as possible. Convenience functions that are available on no platform at all belong in other subsystems (such as LIBUTIL). The following functions are guaranteed: ftruncate strlcpy strlcat mktime rename initgroups memmove strdup setlinebuf vsyslog timegm setenv unsetenv strndup strnlen waitpid seteuid setegid asprintf snprintf vasprintf vsnprintf opendir readdir telldir seekdir clock_gettime closedir dlopen dlclose dlsym dlerror chroot bzero strerror errno mkdtemp mkstemp (a secure one!) pread pwrite chown lchown readline (the library) inet_ntoa inet_ntop inet_pton inet_aton strtoll strtoull socketpair strptime getaddrinfo freeaddrinfo getnameinfo gai_strerror getifaddrs freeifaddrs utime utimes dup2 link readlink symlink realpath poll setproctitle memset_s Types: bool socklen_t uint{8,16,32,64}_t int{8,16,32,64}_t intptr_t sig_atomic_t blksize_t blkcnt_t Constants: PATH_NAME_MAX UINT{16,32,64}_MAX INT32_MAX RTLD_LAZY HOST_NAME_MAX UINT16_MAX UINT32_MAX UINT64_MAX CHAR_BIT Macros: va_copy __FUNCTION__ __FILE__ __LINE__ __LINESTR__ __location__ __STRING __STRINGSTRING MIN MAX QSORT_CAST ZERO_STRUCT ZERO_STRUCTP ZERO_STRUCTPN ZERO_ARRAY ARRAY_SIZE PTR_DIFF Headers: stdint.h stdbool.h Optional C keywords: volatile Prerequisites: memset (for bzero) syslog (for vsyslog) mktemp (for mkstemp and mkdtemp) tdb-1.4.2/lib/replace/closefrom.c0000660000000000000000000000515712746330636016563 0ustar rootroot00000000000000/* * Unix SMB/CIFS implementation. * Samba utility functions * Copyright (C) Volker Lendecke 2016 * * ** NOTE! The following LGPL license applies to the replace * ** library. This does NOT imply that all of Samba is released * ** under the LGPL * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 3 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see . */ #include "replace.h" #include #include #include static int closefrom_sysconf(int lower) { long max_files, fd; max_files = sysconf(_SC_OPEN_MAX); if (max_files == -1) { max_files = 65536; } for (fd=lower; fdd_name, &endptr, 10); if ((fd == 0) && (errno == EINVAL)) { continue; } if ((fd == ULLONG_MAX) && (errno == ERANGE)) { continue; } if (*endptr != '\0') { continue; } if (fd == dir_fd) { continue; } if (fd > INT_MAX) { continue; } if (fd < lower) { continue; } if (num_fds >= (fd_array_size / sizeof(int))) { void *tmp; if (fd_array_size == 0) { fd_array_size = 16 * sizeof(int); } else { if (fd_array_size + fd_array_size < fd_array_size) { /* overflow */ goto fail; } fd_array_size = fd_array_size + fd_array_size; } tmp = realloc(fds, fd_array_size); if (tmp == NULL) { goto fail; } fds = tmp; } fds[num_fds++] = fd; } for (i=0; i that this crypt routine may sometimes get the wrong answer. Only use UFC_CRYT if you really need it. */ #include "replace.h" #ifndef HAVE_CRYPT /* * UFC-crypt: ultra fast crypt(3) implementation * * Copyright (C) 1991-1998, Free Software Foundation, Inc. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 3 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see . * * @(#)crypt_util.c 2.31 02/08/92 * * Support routines * */ #ifndef long32 #define long32 int32_t #endif #ifndef long64 #define long64 int64_t #endif #ifndef ufc_long #define ufc_long unsigned #endif #ifndef _UFC_64_ #define _UFC_32_ #endif /* * Permutation done once on the 56 bit * key derived from the original 8 byte ASCII key. */ static int pc1[56] = { 57, 49, 41, 33, 25, 17, 9, 1, 58, 50, 42, 34, 26, 18, 10, 2, 59, 51, 43, 35, 27, 19, 11, 3, 60, 52, 44, 36, 63, 55, 47, 39, 31, 23, 15, 7, 62, 54, 46, 38, 30, 22, 14, 6, 61, 53, 45, 37, 29, 21, 13, 5, 28, 20, 12, 4 }; /* * How much to rotate each 28 bit half of the pc1 permutated * 56 bit key before using pc2 to give the i' key */ static int rots[16] = { 1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1 }; /* * Permutation giving the key * of the i' DES round */ static int pc2[48] = { 14, 17, 11, 24, 1, 5, 3, 28, 15, 6, 21, 10, 23, 19, 12, 4, 26, 8, 16, 7, 27, 20, 13, 2, 41, 52, 31, 37, 47, 55, 30, 40, 51, 45, 33, 48, 44, 49, 39, 56, 34, 53, 46, 42, 50, 36, 29, 32 }; /* * The E expansion table which selects * bits from the 32 bit intermediate result. */ static int esel[48] = { 32, 1, 2, 3, 4, 5, 4, 5, 6, 7, 8, 9, 8, 9, 10, 11, 12, 13, 12, 13, 14, 15, 16, 17, 16, 17, 18, 19, 20, 21, 20, 21, 22, 23, 24, 25, 24, 25, 26, 27, 28, 29, 28, 29, 30, 31, 32, 1 }; static int e_inverse[64]; /* * Permutation done on the * result of sbox lookups */ static int perm32[32] = { 16, 7, 20, 21, 29, 12, 28, 17, 1, 15, 23, 26, 5, 18, 31, 10, 2, 8, 24, 14, 32, 27, 3, 9, 19, 13, 30, 6, 22, 11, 4, 25 }; /* * The sboxes */ static int sbox[8][4][16]= { { { 14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7 }, { 0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8 }, { 4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0 }, { 15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13 } }, { { 15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10 }, { 3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5 }, { 0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15 }, { 13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9 } }, { { 10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8 }, { 13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1 }, { 13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7 }, { 1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12 } }, { { 7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15 }, { 13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9 }, { 10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4 }, { 3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14 } }, { { 2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9 }, { 14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6 }, { 4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14 }, { 11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3 } }, { { 12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11 }, { 10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8 }, { 9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6 }, { 4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13 } }, { { 4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1 }, { 13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6 }, { 1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2 }, { 6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12 } }, { { 13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7 }, { 1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2 }, { 7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8 }, { 2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11 } } }; /* * This is the final * permutation matrix */ static int final_perm[64] = { 40, 8, 48, 16, 56, 24, 64, 32, 39, 7, 47, 15, 55, 23, 63, 31, 38, 6, 46, 14, 54, 22, 62, 30, 37, 5, 45, 13, 53, 21, 61, 29, 36, 4, 44, 12, 52, 20, 60, 28, 35, 3, 43, 11, 51, 19, 59, 27, 34, 2, 42, 10, 50, 18, 58, 26, 33, 1, 41, 9, 49, 17, 57, 25 }; /* * The 16 DES keys in BITMASK format */ #ifdef _UFC_32_ long32 _ufc_keytab[16][2]; #endif #ifdef _UFC_64_ long64 _ufc_keytab[16]; #endif #define ascii_to_bin(c) ((c)>='a'?(c-59):(c)>='A'?((c)-53):(c)-'.') #define bin_to_ascii(c) ((c)>=38?((c)-38+'a'):(c)>=12?((c)-12+'A'):(c)+'.') /* Macro to set a bit (0..23) */ #define BITMASK(i) ( (1<<(11-(i)%12+3)) << ((i)<12?16:0) ) /* * sb arrays: * * Workhorses of the inner loop of the DES implementation. * They do sbox lookup, shifting of this value, 32 bit * permutation and E permutation for the next round. * * Kept in 'BITMASK' format. */ #ifdef _UFC_32_ long32 _ufc_sb0[8192], _ufc_sb1[8192], _ufc_sb2[8192], _ufc_sb3[8192]; static long32 *sb[4] = {_ufc_sb0, _ufc_sb1, _ufc_sb2, _ufc_sb3}; #endif #ifdef _UFC_64_ long64 _ufc_sb0[4096], _ufc_sb1[4096], _ufc_sb2[4096], _ufc_sb3[4096]; static long64 *sb[4] = {_ufc_sb0, _ufc_sb1, _ufc_sb2, _ufc_sb3}; #endif /* * eperm32tab: do 32 bit permutation and E selection * * The first index is the byte number in the 32 bit value to be permuted * - second - is the value of this byte * - third - selects the two 32 bit values * * The table is used and generated internally in init_des to speed it up */ static ufc_long eperm32tab[4][256][2]; /* * do_pc1: permform pc1 permutation in the key schedule generation. * * The first index is the byte number in the 8 byte ASCII key * - second - - the two 28 bits halfs of the result * - third - selects the 7 bits actually used of each byte * * The result is kept with 28 bit per 32 bit with the 4 most significant * bits zero. */ static ufc_long do_pc1[8][2][128]; /* * do_pc2: permform pc2 permutation in the key schedule generation. * * The first index is the septet number in the two 28 bit intermediate values * - second - - - septet values * * Knowledge of the structure of the pc2 permutation is used. * * The result is kept with 28 bit per 32 bit with the 4 most significant * bits zero. */ static ufc_long do_pc2[8][128]; /* * efp: undo an extra e selection and do final * permutation giving the DES result. * * Invoked 6 bit a time on two 48 bit values * giving two 32 bit longs. */ static ufc_long efp[16][64][2]; static unsigned char bytemask[8] = { 0x80, 0x40, 0x20, 0x10, 0x08, 0x04, 0x02, 0x01 }; static ufc_long longmask[32] = { 0x80000000, 0x40000000, 0x20000000, 0x10000000, 0x08000000, 0x04000000, 0x02000000, 0x01000000, 0x00800000, 0x00400000, 0x00200000, 0x00100000, 0x00080000, 0x00040000, 0x00020000, 0x00010000, 0x00008000, 0x00004000, 0x00002000, 0x00001000, 0x00000800, 0x00000400, 0x00000200, 0x00000100, 0x00000080, 0x00000040, 0x00000020, 0x00000010, 0x00000008, 0x00000004, 0x00000002, 0x00000001 }; /* * Silly rewrite of 'bzero'. I do so * because some machines don't have * bzero and some don't have memset. */ static void clearmem(char *start, int cnt) { while(cnt--) *start++ = '\0'; } static int initialized = 0; /* lookup a 6 bit value in sbox */ #define s_lookup(i,s) sbox[(i)][(((s)>>4) & 0x2)|((s) & 0x1)][((s)>>1) & 0xf]; /* * Initialize unit - may be invoked directly * by fcrypt users. */ static void ufc_init_des(void) { int comes_from_bit; int bit, sg; ufc_long j; ufc_long mask1, mask2; /* * Create the do_pc1 table used * to affect pc1 permutation * when generating keys */ for(bit = 0; bit < 56; bit++) { comes_from_bit = pc1[bit] - 1; mask1 = bytemask[comes_from_bit % 8 + 1]; mask2 = longmask[bit % 28 + 4]; for(j = 0; j < 128; j++) { if(j & mask1) do_pc1[comes_from_bit / 8][bit / 28][j] |= mask2; } } /* * Create the do_pc2 table used * to affect pc2 permutation when * generating keys */ for(bit = 0; bit < 48; bit++) { comes_from_bit = pc2[bit] - 1; mask1 = bytemask[comes_from_bit % 7 + 1]; mask2 = BITMASK(bit % 24); for(j = 0; j < 128; j++) { if(j & mask1) do_pc2[comes_from_bit / 7][j] |= mask2; } } /* * Now generate the table used to do combined * 32 bit permutation and e expansion * * We use it because we have to permute 16384 32 bit * longs into 48 bit in order to initialize sb. * * Looping 48 rounds per permutation becomes * just too slow... * */ clearmem((char*)eperm32tab, sizeof(eperm32tab)); for(bit = 0; bit < 48; bit++) { ufc_long inner_mask1,comes_from; comes_from = perm32[esel[bit]-1]-1; inner_mask1 = bytemask[comes_from % 8]; for(j = 256; j--;) { if(j & inner_mask1) eperm32tab[comes_from / 8][j][bit / 24] |= BITMASK(bit % 24); } } /* * Create the sb tables: * * For each 12 bit segment of an 48 bit intermediate * result, the sb table precomputes the two 4 bit * values of the sbox lookups done with the two 6 * bit halves, shifts them to their proper place, * sends them through perm32 and finally E expands * them so that they are ready for the next * DES round. * */ for(sg = 0; sg < 4; sg++) { int j1, j2; int s1, s2; for(j1 = 0; j1 < 64; j1++) { s1 = s_lookup(2 * sg, j1); for(j2 = 0; j2 < 64; j2++) { ufc_long to_permute, inx; s2 = s_lookup(2 * sg + 1, j2); to_permute = ((s1 << 4) | s2) << (24 - 8 * sg); #ifdef _UFC_32_ inx = ((j1 << 6) | j2) << 1; sb[sg][inx ] = eperm32tab[0][(to_permute >> 24) & 0xff][0]; sb[sg][inx+1] = eperm32tab[0][(to_permute >> 24) & 0xff][1]; sb[sg][inx ] |= eperm32tab[1][(to_permute >> 16) & 0xff][0]; sb[sg][inx+1] |= eperm32tab[1][(to_permute >> 16) & 0xff][1]; sb[sg][inx ] |= eperm32tab[2][(to_permute >> 8) & 0xff][0]; sb[sg][inx+1] |= eperm32tab[2][(to_permute >> 8) & 0xff][1]; sb[sg][inx ] |= eperm32tab[3][(to_permute) & 0xff][0]; sb[sg][inx+1] |= eperm32tab[3][(to_permute) & 0xff][1]; #endif #ifdef _UFC_64_ inx = ((j1 << 6) | j2); sb[sg][inx] = ((long64)eperm32tab[0][(to_permute >> 24) & 0xff][0] << 32) | (long64)eperm32tab[0][(to_permute >> 24) & 0xff][1]; sb[sg][inx] |= ((long64)eperm32tab[1][(to_permute >> 16) & 0xff][0] << 32) | (long64)eperm32tab[1][(to_permute >> 16) & 0xff][1]; sb[sg][inx] |= ((long64)eperm32tab[2][(to_permute >> 8) & 0xff][0] << 32) | (long64)eperm32tab[2][(to_permute >> 8) & 0xff][1]; sb[sg][inx] |= ((long64)eperm32tab[3][(to_permute) & 0xff][0] << 32) | (long64)eperm32tab[3][(to_permute) & 0xff][1]; #endif } } } /* * Create an inverse matrix for esel telling * where to plug out bits if undoing it */ for(bit=48; bit--;) { e_inverse[esel[bit] - 1 ] = bit; e_inverse[esel[bit] - 1 + 32] = bit + 48; } /* * create efp: the matrix used to * undo the E expansion and effect final permutation */ clearmem((char*)efp, sizeof efp); for(bit = 0; bit < 64; bit++) { int o_bit, o_long; ufc_long word_value, inner_mask1, inner_mask2; int comes_from_f_bit, comes_from_e_bit; int comes_from_word, bit_within_word; /* See where bit i belongs in the two 32 bit long's */ o_long = bit / 32; /* 0..1 */ o_bit = bit % 32; /* 0..31 */ /* * And find a bit in the e permutated value setting this bit. * * Note: the e selection may have selected the same bit several * times. By the initialization of e_inverse, we only look * for one specific instance. */ comes_from_f_bit = final_perm[bit] - 1; /* 0..63 */ comes_from_e_bit = e_inverse[comes_from_f_bit]; /* 0..95 */ comes_from_word = comes_from_e_bit / 6; /* 0..15 */ bit_within_word = comes_from_e_bit % 6; /* 0..5 */ inner_mask1 = longmask[bit_within_word + 26]; inner_mask2 = longmask[o_bit]; for(word_value = 64; word_value--;) { if(word_value & inner_mask1) efp[comes_from_word][word_value][o_long] |= inner_mask2; } } initialized++; } /* * Process the elements of the sb table permuting the * bits swapped in the expansion by the current salt. */ #ifdef _UFC_32_ static void shuffle_sb(long32 *k, ufc_long saltbits) { ufc_long j; long32 x; for(j=4096; j--;) { x = (k[0] ^ k[1]) & (long32)saltbits; *k++ ^= x; *k++ ^= x; } } #endif #ifdef _UFC_64_ static void shuffle_sb(long64 *k, ufc_long saltbits) { ufc_long j; long64 x; for(j=4096; j--;) { x = ((*k >> 32) ^ *k) & (long64)saltbits; *k++ ^= (x << 32) | x; } } #endif /* * Setup the unit for a new salt * Hopefully we'll not see a new salt in each crypt call. */ static unsigned char current_salt[3] = "&&"; /* invalid value */ static ufc_long current_saltbits = 0; static int direction = 0; static void setup_salt(const char *s1) { ufc_long i, j, saltbits; const unsigned char *s2 = (const unsigned char *)s1; if(!initialized) ufc_init_des(); if(s2[0] == current_salt[0] && s2[1] == current_salt[1]) return; current_salt[0] = s2[0]; current_salt[1] = s2[1]; /* * This is the only crypt change to DES: * entries are swapped in the expansion table * according to the bits set in the salt. */ saltbits = 0; for(i = 0; i < 2; i++) { long c=ascii_to_bin(s2[i]); if(c < 0 || c > 63) c = 0; for(j = 0; j < 6; j++) { if((c >> j) & 0x1) saltbits |= BITMASK(6 * i + j); } } /* * Permute the sb table values * to reflect the changed e * selection table */ shuffle_sb(_ufc_sb0, current_saltbits ^ saltbits); shuffle_sb(_ufc_sb1, current_saltbits ^ saltbits); shuffle_sb(_ufc_sb2, current_saltbits ^ saltbits); shuffle_sb(_ufc_sb3, current_saltbits ^ saltbits); current_saltbits = saltbits; } static void ufc_mk_keytab(char *key) { ufc_long v1, v2, *k1; int i; #ifdef _UFC_32_ long32 v, *k2 = &_ufc_keytab[0][0]; #endif #ifdef _UFC_64_ long64 v, *k2 = &_ufc_keytab[0]; #endif v1 = v2 = 0; k1 = &do_pc1[0][0][0]; for(i = 8; i--;) { v1 |= k1[*key & 0x7f]; k1 += 128; v2 |= k1[*key++ & 0x7f]; k1 += 128; } for(i = 0; i < 16; i++) { k1 = &do_pc2[0][0]; v1 = (v1 << rots[i]) | (v1 >> (28 - rots[i])); v = k1[(v1 >> 21) & 0x7f]; k1 += 128; v |= k1[(v1 >> 14) & 0x7f]; k1 += 128; v |= k1[(v1 >> 7) & 0x7f]; k1 += 128; v |= k1[(v1 ) & 0x7f]; k1 += 128; #ifdef _UFC_32_ *k2++ = v; v = 0; #endif #ifdef _UFC_64_ v <<= 32; #endif v2 = (v2 << rots[i]) | (v2 >> (28 - rots[i])); v |= k1[(v2 >> 21) & 0x7f]; k1 += 128; v |= k1[(v2 >> 14) & 0x7f]; k1 += 128; v |= k1[(v2 >> 7) & 0x7f]; k1 += 128; v |= k1[(v2 ) & 0x7f]; *k2++ = v; } direction = 0; } /* * Undo an extra E selection and do final permutations */ ufc_long *_ufc_dofinalperm(ufc_long l1, ufc_long l2, ufc_long r1, ufc_long r2) { ufc_long v1, v2, x; static ufc_long ary[2]; x = (l1 ^ l2) & current_saltbits; l1 ^= x; l2 ^= x; x = (r1 ^ r2) & current_saltbits; r1 ^= x; r2 ^= x; v1=v2=0; l1 >>= 3; l2 >>= 3; r1 >>= 3; r2 >>= 3; v1 |= efp[15][ r2 & 0x3f][0]; v2 |= efp[15][ r2 & 0x3f][1]; v1 |= efp[14][(r2 >>= 6) & 0x3f][0]; v2 |= efp[14][ r2 & 0x3f][1]; v1 |= efp[13][(r2 >>= 10) & 0x3f][0]; v2 |= efp[13][ r2 & 0x3f][1]; v1 |= efp[12][(r2 >>= 6) & 0x3f][0]; v2 |= efp[12][ r2 & 0x3f][1]; v1 |= efp[11][ r1 & 0x3f][0]; v2 |= efp[11][ r1 & 0x3f][1]; v1 |= efp[10][(r1 >>= 6) & 0x3f][0]; v2 |= efp[10][ r1 & 0x3f][1]; v1 |= efp[ 9][(r1 >>= 10) & 0x3f][0]; v2 |= efp[ 9][ r1 & 0x3f][1]; v1 |= efp[ 8][(r1 >>= 6) & 0x3f][0]; v2 |= efp[ 8][ r1 & 0x3f][1]; v1 |= efp[ 7][ l2 & 0x3f][0]; v2 |= efp[ 7][ l2 & 0x3f][1]; v1 |= efp[ 6][(l2 >>= 6) & 0x3f][0]; v2 |= efp[ 6][ l2 & 0x3f][1]; v1 |= efp[ 5][(l2 >>= 10) & 0x3f][0]; v2 |= efp[ 5][ l2 & 0x3f][1]; v1 |= efp[ 4][(l2 >>= 6) & 0x3f][0]; v2 |= efp[ 4][ l2 & 0x3f][1]; v1 |= efp[ 3][ l1 & 0x3f][0]; v2 |= efp[ 3][ l1 & 0x3f][1]; v1 |= efp[ 2][(l1 >>= 6) & 0x3f][0]; v2 |= efp[ 2][ l1 & 0x3f][1]; v1 |= efp[ 1][(l1 >>= 10) & 0x3f][0]; v2 |= efp[ 1][ l1 & 0x3f][1]; v1 |= efp[ 0][(l1 >>= 6) & 0x3f][0]; v2 |= efp[ 0][ l1 & 0x3f][1]; ary[0] = v1; ary[1] = v2; return ary; } /* * crypt only: convert from 64 bit to 11 bit ASCII * prefixing with the salt */ static char *output_conversion(ufc_long v1, ufc_long v2, const char *salt) { static char outbuf[14]; int i, s; outbuf[0] = salt[0]; outbuf[1] = salt[1] ? salt[1] : salt[0]; for(i = 0; i < 5; i++) outbuf[i + 2] = bin_to_ascii((v1 >> (26 - 6 * i)) & 0x3f); s = (v2 & 0xf) << 2; v2 = (v2 >> 2) | ((v1 & 0x3) << 30); for(i = 5; i < 10; i++) outbuf[i + 2] = bin_to_ascii((v2 >> (56 - 6 * i)) & 0x3f); outbuf[12] = bin_to_ascii(s); outbuf[13] = 0; return outbuf; } /* * UNIX crypt function */ static ufc_long *_ufc_doit(ufc_long , ufc_long, ufc_long, ufc_long, ufc_long); char *ufc_crypt(const char *key,const char *salt) { ufc_long *s; char ktab[9]; /* * Hack DES tables according to salt */ setup_salt(salt); /* * Setup key schedule */ clearmem(ktab, sizeof ktab); strncpy(ktab, key, 8); ufc_mk_keytab(ktab); /* * Go for the 25 DES encryptions */ s = _ufc_doit((ufc_long)0, (ufc_long)0, (ufc_long)0, (ufc_long)0, (ufc_long)25); /* * And convert back to 6 bit ASCII */ return output_conversion(s[0], s[1], salt); } #ifdef _UFC_32_ /* * 32 bit version */ extern long32 _ufc_keytab[16][2]; extern long32 _ufc_sb0[], _ufc_sb1[], _ufc_sb2[], _ufc_sb3[]; #define SBA(sb, v) (*(long32*)((char*)(sb)+(v))) static ufc_long *_ufc_doit(ufc_long l1, ufc_long l2, ufc_long r1, ufc_long r2, ufc_long itr) { int i; long32 s, *k; while(itr--) { k = &_ufc_keytab[0][0]; for(i=8; i--; ) { s = *k++ ^ r1; l1 ^= SBA(_ufc_sb1, s & 0xffff); l2 ^= SBA(_ufc_sb1, (s & 0xffff)+4); l1 ^= SBA(_ufc_sb0, s >>= 16); l2 ^= SBA(_ufc_sb0, (s) +4); s = *k++ ^ r2; l1 ^= SBA(_ufc_sb3, s & 0xffff); l2 ^= SBA(_ufc_sb3, (s & 0xffff)+4); l1 ^= SBA(_ufc_sb2, s >>= 16); l2 ^= SBA(_ufc_sb2, (s) +4); s = *k++ ^ l1; r1 ^= SBA(_ufc_sb1, s & 0xffff); r2 ^= SBA(_ufc_sb1, (s & 0xffff)+4); r1 ^= SBA(_ufc_sb0, s >>= 16); r2 ^= SBA(_ufc_sb0, (s) +4); s = *k++ ^ l2; r1 ^= SBA(_ufc_sb3, s & 0xffff); r2 ^= SBA(_ufc_sb3, (s & 0xffff)+4); r1 ^= SBA(_ufc_sb2, s >>= 16); r2 ^= SBA(_ufc_sb2, (s) +4); } s=l1; l1=r1; r1=s; s=l2; l2=r2; r2=s; } return _ufc_dofinalperm(l1, l2, r1, r2); } #endif #ifdef _UFC_64_ /* * 64 bit version */ extern long64 _ufc_keytab[16]; extern long64 _ufc_sb0[], _ufc_sb1[], _ufc_sb2[], _ufc_sb3[]; #define SBA(sb, v) (*(long64*)((char*)(sb)+(v))) static ufc_long *_ufc_doit(ufc_long l1, ufc_long l2, ufc_long r1, ufc_long r2, ufc_long itr) { int i; long64 l, r, s, *k; l = (((long64)l1) << 32) | ((long64)l2); r = (((long64)r1) << 32) | ((long64)r2); while(itr--) { k = &_ufc_keytab[0]; for(i=8; i--; ) { s = *k++ ^ r; l ^= SBA(_ufc_sb3, (s >> 0) & 0xffff); l ^= SBA(_ufc_sb2, (s >> 16) & 0xffff); l ^= SBA(_ufc_sb1, (s >> 32) & 0xffff); l ^= SBA(_ufc_sb0, (s >> 48) & 0xffff); s = *k++ ^ l; r ^= SBA(_ufc_sb3, (s >> 0) & 0xffff); r ^= SBA(_ufc_sb2, (s >> 16) & 0xffff); r ^= SBA(_ufc_sb1, (s >> 32) & 0xffff); r ^= SBA(_ufc_sb0, (s >> 48) & 0xffff); } s=l; l=r; r=s; } l1 = l >> 32; l2 = l & 0xffffffff; r1 = r >> 32; r2 = r & 0xffffffff; return _ufc_dofinalperm(l1, l2, r1, r2); } #endif #else int ufc_dummy_procedure(void); int ufc_dummy_procedure(void) {return 0;} #endif tdb-1.4.2/lib/replace/cwrap.c0000660000000000000000000000226212406075657015702 0ustar rootroot00000000000000/* * Unix SMB/CIFS implementation. * * Replaceable functions by cwrap * * Copyright (c) 2014 Andreas Schneider * * ** NOTE! The following LGPL license applies to the replace * ** library. This does NOT imply that all of Samba is released * ** under the LGPL * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 3 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see . */ #include "replace.h" bool nss_wrapper_enabled(void) { return false; } bool nss_wrapper_hosts_enabled(void) { return false; } bool socket_wrapper_enabled(void) { return false; } bool uid_wrapper_enabled(void) { return false; } tdb-1.4.2/lib/replace/dlfcn.c0000660000000000000000000000354312406075657015657 0ustar rootroot00000000000000/* Unix SMB/CIFS implementation. Samba system utilities Copyright (C) Andrew Tridgell 1992-1998 Copyright (C) Jeremy Allison 1998-2002 Copyright (C) Jelmer Vernooij 2006 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "replace.h" #ifdef HAVE_DL_H #include #endif #ifndef HAVE_DLOPEN #ifdef DLOPEN_TAKES_UNSIGNED_FLAGS void *rep_dlopen(const char *name, unsigned int flags) #else void *rep_dlopen(const char *name, int flags) #endif { #ifdef HAVE_SHL_LOAD if (name == NULL) return PROG_HANDLE; return (void *)shl_load(name, flags, 0); #else return NULL; #endif } #endif #ifndef HAVE_DLSYM void *rep_dlsym(void *handle, const char *symbol) { #ifdef HAVE_SHL_FINDSYM void *sym_addr; if (!shl_findsym((shl_t *)&handle, symbol, TYPE_UNDEFINED, &sym_addr)) return sym_addr; #endif return NULL; } #endif #ifndef HAVE_DLERROR char *rep_dlerror(void) { return "dynamic loading of objects not supported on this platform"; } #endif #ifndef HAVE_DLCLOSE int rep_dlclose(void *handle) { #ifdef HAVE_SHL_CLOSE return shl_unload((shl_t)handle); #else return 0; #endif } #endif tdb-1.4.2/lib/replace/getaddrinfo.c0000660000000000000000000002454412406075657017063 0ustar rootroot00000000000000/* PostgreSQL Database Management System (formerly known as Postgres, then as Postgres95) Portions Copyright (c) 1996-2005, The PostgreSQL Global Development Group Portions Copyright (c) 1994, The Regents of the University of California Permission to use, copy, modify, and distribute this software and its documentation for any purpose, without fee, and without a written agreement is hereby granted, provided that the above copyright notice and this paragraph and the following two paragraphs appear in all copies. IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. */ /*------------------------------------------------------------------------- * * getaddrinfo.c * Support getaddrinfo() on platforms that don't have it. * * We also supply getnameinfo() here, assuming that the platform will have * it if and only if it has getaddrinfo(). If this proves false on some * platform, we'll need to split this file and provide a separate configure * test for getnameinfo(). * * Copyright (c) 2003-2007, PostgreSQL Global Development Group * * Copyright (C) 2007 Jeremy Allison. * Modified to return multiple IPv4 addresses for Samba. * *------------------------------------------------------------------------- */ #include "replace.h" #include "system/network.h" #ifndef SMB_MALLOC #define SMB_MALLOC(s) malloc(s) #endif #ifndef SMB_STRDUP #define SMB_STRDUP(s) strdup(s) #endif static int check_hostent_err(struct hostent *hp) { if (!hp) { switch (h_errno) { case HOST_NOT_FOUND: case NO_DATA: return EAI_NONAME; case TRY_AGAIN: return EAI_AGAIN; case NO_RECOVERY: default: return EAI_FAIL; } } if (!hp->h_name || hp->h_addrtype != AF_INET) { return EAI_FAIL; } return 0; } static char *canon_name_from_hostent(struct hostent *hp, int *perr) { char *ret = NULL; *perr = check_hostent_err(hp); if (*perr) { return NULL; } ret = SMB_STRDUP(hp->h_name); if (!ret) { *perr = EAI_MEMORY; } return ret; } static char *get_my_canon_name(int *perr) { char name[HOST_NAME_MAX+1]; if (gethostname(name, HOST_NAME_MAX) == -1) { *perr = EAI_FAIL; return NULL; } /* Ensure null termination. */ name[HOST_NAME_MAX] = '\0'; return canon_name_from_hostent(gethostbyname(name), perr); } static char *get_canon_name_from_addr(struct in_addr ip, int *perr) { return canon_name_from_hostent( gethostbyaddr(&ip, sizeof(ip), AF_INET), perr); } static struct addrinfo *alloc_entry(const struct addrinfo *hints, struct in_addr ip, unsigned short port) { struct sockaddr_in *psin = NULL; struct addrinfo *ai = SMB_MALLOC(sizeof(*ai)); if (!ai) { return NULL; } memset(ai, '\0', sizeof(*ai)); psin = SMB_MALLOC(sizeof(*psin)); if (!psin) { free(ai); return NULL; } memset(psin, '\0', sizeof(*psin)); psin->sin_family = AF_INET; psin->sin_port = htons(port); psin->sin_addr = ip; ai->ai_flags = 0; ai->ai_family = AF_INET; ai->ai_socktype = hints->ai_socktype; ai->ai_protocol = hints->ai_protocol; ai->ai_addrlen = sizeof(*psin); ai->ai_addr = (struct sockaddr *) psin; ai->ai_canonname = NULL; ai->ai_next = NULL; return ai; } /* * get address info for a single ipv4 address. * * Bugs: - servname can only be a number, not text. */ static int getaddr_info_single_addr(const char *service, uint32_t addr, const struct addrinfo *hints, struct addrinfo **res) { struct addrinfo *ai = NULL; struct in_addr ip; unsigned short port = 0; if (service) { port = (unsigned short)atoi(service); } ip.s_addr = htonl(addr); ai = alloc_entry(hints, ip, port); if (!ai) { return EAI_MEMORY; } /* If we're asked for the canonical name, * make sure it returns correctly. */ if (!(hints->ai_flags & AI_NUMERICSERV) && hints->ai_flags & AI_CANONNAME) { int err; if (addr == INADDR_LOOPBACK || addr == INADDR_ANY) { ai->ai_canonname = get_my_canon_name(&err); } else { ai->ai_canonname = get_canon_name_from_addr(ip,&err); } if (ai->ai_canonname == NULL) { freeaddrinfo(ai); return err; } } *res = ai; return 0; } /* * get address info for multiple ipv4 addresses. * * Bugs: - servname can only be a number, not text. */ static int getaddr_info_name(const char *node, const char *service, const struct addrinfo *hints, struct addrinfo **res) { struct addrinfo *listp = NULL, *prevp = NULL; char **pptr = NULL; int err; struct hostent *hp = NULL; unsigned short port = 0; if (service) { port = (unsigned short)atoi(service); } hp = gethostbyname(node); err = check_hostent_err(hp); if (err) { return err; } for(pptr = hp->h_addr_list; *pptr; pptr++) { struct in_addr ip = *(struct in_addr *)*pptr; struct addrinfo *ai = alloc_entry(hints, ip, port); if (!ai) { freeaddrinfo(listp); return EAI_MEMORY; } if (!listp) { listp = ai; prevp = ai; ai->ai_canonname = SMB_STRDUP(hp->h_name); if (!ai->ai_canonname) { freeaddrinfo(listp); return EAI_MEMORY; } } else { prevp->ai_next = ai; prevp = ai; } } *res = listp; return 0; } /* * get address info for ipv4 sockets. * * Bugs: - servname can only be a number, not text. */ int rep_getaddrinfo(const char *node, const char *service, const struct addrinfo * hintp, struct addrinfo ** res) { struct addrinfo hints; /* Setup the hints struct. */ if (hintp == NULL) { memset(&hints, 0, sizeof(hints)); hints.ai_family = AF_INET; hints.ai_socktype = SOCK_STREAM; } else { memcpy(&hints, hintp, sizeof(hints)); } if (hints.ai_family != AF_INET && hints.ai_family != AF_UNSPEC) { return EAI_FAMILY; } if (hints.ai_socktype == 0) { hints.ai_socktype = SOCK_STREAM; } if (!node && !service) { return EAI_NONAME; } if (node) { if (node[0] == '\0') { return getaddr_info_single_addr(service, INADDR_ANY, &hints, res); } else if (hints.ai_flags & AI_NUMERICHOST) { struct in_addr ip; if (!inet_aton(node, &ip)) { return EAI_FAIL; } return getaddr_info_single_addr(service, ntohl(ip.s_addr), &hints, res); } else { return getaddr_info_name(node, service, &hints, res); } } else if (hints.ai_flags & AI_PASSIVE) { return getaddr_info_single_addr(service, INADDR_ANY, &hints, res); } return getaddr_info_single_addr(service, INADDR_LOOPBACK, &hints, res); } void rep_freeaddrinfo(struct addrinfo *res) { struct addrinfo *next = NULL; for (;res; res = next) { next = res->ai_next; free(res->ai_canonname); free(res->ai_addr); free(res); } } const char *rep_gai_strerror(int errcode) { #ifdef HAVE_HSTRERROR int hcode; switch (errcode) { case EAI_NONAME: hcode = HOST_NOT_FOUND; break; case EAI_AGAIN: hcode = TRY_AGAIN; break; case EAI_FAIL: default: hcode = NO_RECOVERY; break; } return hstrerror(hcode); #else /* !HAVE_HSTRERROR */ switch (errcode) { case EAI_NONAME: return "Unknown host"; case EAI_AGAIN: return "Host name lookup failure"; #ifdef EAI_BADFLAGS case EAI_BADFLAGS: return "Invalid argument"; #endif #ifdef EAI_FAMILY case EAI_FAMILY: return "Address family not supported"; #endif #ifdef EAI_MEMORY case EAI_MEMORY: return "Not enough memory"; #endif #ifdef EAI_NODATA case EAI_NODATA: return "No host data of that type was found"; #endif #ifdef EAI_SERVICE case EAI_SERVICE: return "Class type not found"; #endif #ifdef EAI_SOCKTYPE case EAI_SOCKTYPE: return "Socket type not supported"; #endif default: return "Unknown server error"; } #endif /* HAVE_HSTRERROR */ } static int gethostnameinfo(const struct sockaddr *sa, char *node, size_t nodelen, int flags) { int ret = -1; char *p = NULL; if (!(flags & NI_NUMERICHOST)) { struct hostent *hp = gethostbyaddr( &((struct sockaddr_in *)sa)->sin_addr, sizeof(struct in_addr), sa->sa_family); ret = check_hostent_err(hp); if (ret == 0) { /* Name looked up successfully. */ ret = snprintf(node, nodelen, "%s", hp->h_name); if (ret < 0 || (size_t)ret >= nodelen) { return EAI_MEMORY; } if (flags & NI_NOFQDN) { p = strchr(node,'.'); if (p) { *p = '\0'; } } return 0; } if (flags & NI_NAMEREQD) { /* If we require a name and didn't get one, * automatically fail. */ return ret; } /* Otherwise just fall into the numeric host code... */ } p = inet_ntoa(((struct sockaddr_in *)sa)->sin_addr); ret = snprintf(node, nodelen, "%s", p); if (ret < 0 || (size_t)ret >= nodelen) { return EAI_MEMORY; } return 0; } static int getservicenameinfo(const struct sockaddr *sa, char *service, size_t servicelen, int flags) { int ret = -1; int port = ntohs(((struct sockaddr_in *)sa)->sin_port); if (!(flags & NI_NUMERICSERV)) { struct servent *se = getservbyport( port, (flags & NI_DGRAM) ? "udp" : "tcp"); if (se && se->s_name) { /* Service name looked up successfully. */ ret = snprintf(service, servicelen, "%s", se->s_name); if (ret < 0 || (size_t)ret >= servicelen) { return EAI_MEMORY; } return 0; } /* Otherwise just fall into the numeric service code... */ } ret = snprintf(service, servicelen, "%d", port); if (ret < 0 || (size_t)ret >= servicelen) { return EAI_MEMORY; } return 0; } /* * Convert an ipv4 address to a hostname. * * Bugs: - No IPv6 support. */ int rep_getnameinfo(const struct sockaddr *sa, socklen_t salen, char *node, size_t nodelen, char *service, size_t servicelen, int flags) { /* Invalid arguments. */ if (sa == NULL || (node == NULL && service == NULL)) { return EAI_FAIL; } if (sa->sa_family != AF_INET) { return EAI_FAIL; } if (salen < sizeof(struct sockaddr_in)) { return EAI_FAIL; } if (node) { return gethostnameinfo(sa, node, nodelen, flags); } if (service) { return getservicenameinfo(sa, service, servicelen, flags); } return 0; } tdb-1.4.2/lib/replace/getaddrinfo.h0000660000000000000000000000614112406075657017061 0ustar rootroot00000000000000/* PostgreSQL Database Management System (formerly known as Postgres, then as Postgres95) Portions Copyright (c) 1996-2005, The PostgreSQL Global Development Group Portions Copyright (c) 1994, The Regents of the University of California Permission to use, copy, modify, and distribute this software and its documentation for any purpose, without fee, and without a written agreement is hereby granted, provided that the above copyright notice and this paragraph and the following two paragraphs appear in all copies. IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. */ /*------------------------------------------------------------------------- * * getaddrinfo.h * Support getaddrinfo() on platforms that don't have it. * * Note: we use our own routines on platforms that don't HAVE_STRUCT_ADDRINFO, * whether or not the library routine getaddrinfo() can be found. This * policy is needed because on some platforms a manually installed libbind.a * may provide getaddrinfo(), yet the system headers may not provide the * struct definitions needed to call it. To avoid conflict with the libbind * definition in such cases, we rename our routines to pg_xxx() via macros. * in lib/replace we use rep_xxx() * This code will also work on platforms where struct addrinfo is defined * in the system headers but no getaddrinfo() can be located. * * Copyright (c) 2003-2007, PostgreSQL Global Development Group * *------------------------------------------------------------------------- */ #ifndef GETADDRINFO_H #define GETADDRINFO_H #ifndef HAVE_GETADDRINFO /* Rename private copies per comments above */ #ifdef getaddrinfo #undef getaddrinfo #endif #define getaddrinfo rep_getaddrinfo #define HAVE_GETADDRINFO #ifdef freeaddrinfo #undef freeaddrinfo #endif #define freeaddrinfo rep_freeaddrinfo #define HAVE_FREEADDRINFO #ifdef gai_strerror #undef gai_strerror #endif #define gai_strerror rep_gai_strerror #define HAVE_GAI_STRERROR #ifdef getnameinfo #undef getnameinfo #endif #define getnameinfo rep_getnameinfo #ifndef HAVE_GETNAMEINFO #define HAVE_GETNAMEINFO #endif extern int rep_getaddrinfo(const char *node, const char *service, const struct addrinfo * hints, struct addrinfo ** res); extern void rep_freeaddrinfo(struct addrinfo * res); extern const char *rep_gai_strerror(int errcode); extern int rep_getnameinfo(const struct sockaddr * sa, socklen_t salen, char *node, size_t nodelen, char *service, size_t servicelen, int flags); #endif /* HAVE_GETADDRINFO */ #endif /* GETADDRINFO_H */ tdb-1.4.2/lib/replace/getifaddrs.c0000660000000000000000000002046013444661620016674 0ustar rootroot00000000000000/* Unix SMB/CIFS implementation. Samba utility functions Copyright (C) Andrew Tridgell 1998 Copyright (C) Jeremy Allison 2007 Copyright (C) Jelmer Vernooij 2007 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "replace.h" #include "system/network.h" #include #include #include #ifdef HAVE_SYS_TIME_H #include #endif #ifndef SIOCGIFCONF #ifdef HAVE_SYS_SOCKIO_H #include #endif #endif #ifdef HAVE_IFACE_GETIFADDRS #define _FOUND_IFACE_ANY #else void rep_freeifaddrs(struct ifaddrs *ifp) { if (ifp != NULL) { free(ifp->ifa_name); free(ifp->ifa_addr); free(ifp->ifa_netmask); free(ifp->ifa_dstaddr); freeifaddrs(ifp->ifa_next); free(ifp); } } static struct sockaddr *sockaddr_dup(struct sockaddr *sa) { struct sockaddr *ret; socklen_t socklen; #ifdef HAVE_SOCKADDR_SA_LEN socklen = sa->sa_len; #else socklen = sizeof(struct sockaddr_storage); #endif ret = calloc(1, socklen); if (ret == NULL) return NULL; memcpy(ret, sa, socklen); return ret; } #endif #ifdef HAVE_IFACE_IFCONF /* this works for Linux 2.2, Solaris 2.5, SunOS4, HPUX 10.20, OSF1 V4.0, Ultrix 4.4, SCO Unix 3.2, IRIX 6.4 and FreeBSD 3.2. It probably also works on any BSD style system. */ int rep_getifaddrs(struct ifaddrs **ifap) { struct ifconf ifc; char buff[8192]; int fd, i, n; struct ifreq *ifr=NULL; struct ifaddrs *curif; struct ifaddrs *lastif = NULL; *ifap = NULL; if ((fd = socket(AF_INET, SOCK_DGRAM, 0)) == -1) { return -1; } ifc.ifc_len = sizeof(buff); ifc.ifc_buf = buff; if (ioctl(fd, SIOCGIFCONF, &ifc) != 0) { close(fd); return -1; } ifr = ifc.ifc_req; n = ifc.ifc_len / sizeof(struct ifreq); /* Loop through interfaces, looking for given IP address */ for (i=n-1; i>=0; i--) { if (ioctl(fd, SIOCGIFFLAGS, &ifr[i]) == -1) { freeifaddrs(*ifap); close(fd); return -1; } curif = calloc(1, sizeof(struct ifaddrs)); if (curif == NULL) { freeifaddrs(*ifap); close(fd); return -1; } curif->ifa_name = strdup(ifr[i].ifr_name); if (curif->ifa_name == NULL) { free(curif); freeifaddrs(*ifap); close(fd); return -1; } curif->ifa_flags = ifr[i].ifr_flags; curif->ifa_dstaddr = NULL; curif->ifa_data = NULL; curif->ifa_next = NULL; curif->ifa_addr = NULL; if (ioctl(fd, SIOCGIFADDR, &ifr[i]) != -1) { curif->ifa_addr = sockaddr_dup(&ifr[i].ifr_addr); if (curif->ifa_addr == NULL) { free(curif->ifa_name); free(curif); freeifaddrs(*ifap); close(fd); return -1; } } curif->ifa_netmask = NULL; if (ioctl(fd, SIOCGIFNETMASK, &ifr[i]) != -1) { curif->ifa_netmask = sockaddr_dup(&ifr[i].ifr_addr); if (curif->ifa_netmask == NULL) { if (curif->ifa_addr != NULL) { free(curif->ifa_addr); } free(curif->ifa_name); free(curif); freeifaddrs(*ifap); close(fd); return -1; } } if (lastif == NULL) { *ifap = curif; } else { lastif->ifa_next = curif; } lastif = curif; } close(fd); return 0; } #define _FOUND_IFACE_ANY #endif /* HAVE_IFACE_IFCONF */ #ifdef HAVE_IFACE_IFREQ #ifndef I_STR #include #endif /**************************************************************************** this should cover most of the streams based systems Thanks to Andrej.Borsenkow@mow.siemens.ru for several ideas in this code ****************************************************************************/ int rep_getifaddrs(struct ifaddrs **ifap) { struct ifreq ifreq; struct strioctl strioctl; char buff[8192]; int fd, i, n; struct ifreq *ifr=NULL; struct ifaddrs *curif; struct ifaddrs *lastif = NULL; *ifap = NULL; if ((fd = socket(AF_INET, SOCK_DGRAM, 0)) == -1) { return -1; } strioctl.ic_cmd = SIOCGIFCONF; strioctl.ic_dp = buff; strioctl.ic_len = sizeof(buff); if (ioctl(fd, I_STR, &strioctl) < 0) { close(fd); return -1; } /* we can ignore the possible sizeof(int) here as the resulting number of interface structures won't change */ n = strioctl.ic_len / sizeof(struct ifreq); /* we will assume that the kernel returns the length as an int at the start of the buffer if the offered size is a multiple of the structure size plus an int */ if (n*sizeof(struct ifreq) + sizeof(int) == strioctl.ic_len) { ifr = (struct ifreq *)(buff + sizeof(int)); } else { ifr = (struct ifreq *)buff; } /* Loop through interfaces */ for (i = 0; iifa_next = curif; } strioctl.ic_cmd = SIOCGIFFLAGS; strioctl.ic_dp = (char *)&ifreq; strioctl.ic_len = sizeof(struct ifreq); if (ioctl(fd, I_STR, &strioctl) != 0) { freeifaddrs(*ifap); return -1; } curif->ifa_flags = ifreq.ifr_flags; strioctl.ic_cmd = SIOCGIFADDR; strioctl.ic_dp = (char *)&ifreq; strioctl.ic_len = sizeof(struct ifreq); if (ioctl(fd, I_STR, &strioctl) != 0) { freeifaddrs(*ifap); return -1; } curif->ifa_name = strdup(ifreq.ifr_name); curif->ifa_addr = sockaddr_dup(&ifreq.ifr_addr); curif->ifa_dstaddr = NULL; curif->ifa_data = NULL; curif->ifa_next = NULL; curif->ifa_netmask = NULL; strioctl.ic_cmd = SIOCGIFNETMASK; strioctl.ic_dp = (char *)&ifreq; strioctl.ic_len = sizeof(struct ifreq); if (ioctl(fd, I_STR, &strioctl) != 0) { freeifaddrs(*ifap); return -1; } curif->ifa_netmask = sockaddr_dup(&ifreq.ifr_addr); lastif = curif; } close(fd); return 0; } #define _FOUND_IFACE_ANY #endif /* HAVE_IFACE_IFREQ */ #ifdef HAVE_IFACE_AIX /**************************************************************************** this one is for AIX (tested on 4.2) ****************************************************************************/ int rep_getifaddrs(struct ifaddrs **ifap) { char buff[8192]; int fd, i; struct ifconf ifc; struct ifreq *ifr=NULL; struct ifaddrs *curif; struct ifaddrs *lastif = NULL; *ifap = NULL; if ((fd = socket(AF_INET, SOCK_DGRAM, 0)) == -1) { return -1; } ifc.ifc_len = sizeof(buff); ifc.ifc_buf = buff; if (ioctl(fd, SIOCGIFCONF, &ifc) != 0) { close(fd); return -1; } ifr = ifc.ifc_req; /* Loop through interfaces */ i = ifc.ifc_len; while (i > 0) { unsigned int inc; inc = ifr->ifr_addr.sa_len; if (ioctl(fd, SIOCGIFADDR, ifr) != 0) { freeaddrinfo(*ifap); return -1; } curif = calloc(1, sizeof(struct ifaddrs)); if (lastif == NULL) { *ifap = curif; } else { lastif->ifa_next = curif; } curif->ifa_name = strdup(ifr->ifr_name); curif->ifa_addr = sockaddr_dup(&ifr->ifr_addr); curif->ifa_dstaddr = NULL; curif->ifa_data = NULL; curif->ifa_netmask = NULL; curif->ifa_next = NULL; if (ioctl(fd, SIOCGIFFLAGS, ifr) != 0) { freeaddrinfo(*ifap); return -1; } curif->ifa_flags = ifr->ifr_flags; if (ioctl(fd, SIOCGIFNETMASK, ifr) != 0) { freeaddrinfo(*ifap); return -1; } curif->ifa_netmask = sockaddr_dup(&ifr->ifr_addr); lastif = curif; next: /* * Patch from Archie Cobbs (archie@whistle.com). The * addresses in the SIOCGIFCONF interface list have a * minimum size. Usually this doesn't matter, but if * your machine has tunnel interfaces, etc. that have * a zero length "link address", this does matter. */ if (inc < sizeof(ifr->ifr_addr)) inc = sizeof(ifr->ifr_addr); inc += IFNAMSIZ; ifr = (struct ifreq*) (((char*) ifr) + inc); i -= inc; } close(fd); return 0; } #define _FOUND_IFACE_ANY #endif /* HAVE_IFACE_AIX */ #ifndef _FOUND_IFACE_ANY int rep_getifaddrs(struct ifaddrs **ifap) { errno = ENOSYS; return -1; } #endif tdb-1.4.2/lib/replace/hdr_replace.h0000660000000000000000000000012412406075657017036 0ustar rootroot00000000000000/* this is a replacement header for a missing system header */ #include "replace.h" tdb-1.4.2/lib/replace/inet_aton.c0000660000000000000000000000223212406075657016543 0ustar rootroot00000000000000/* * Unix SMB/CIFS implementation. * replacement functions * Copyright (C) Michael Adam 2008 * * ** NOTE! The following LGPL license applies to the replace * ** library. This does NOT imply that all of Samba is released * ** under the LGPL * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 3 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see . */ #include "replace.h" #include "system/network.h" /** * We know that we have inet_pton from earlier libreplace checks. */ int rep_inet_aton(const char *src, struct in_addr *dst) { return (inet_pton(AF_INET, src, dst) > 0) ? 1 : 0; } tdb-1.4.2/lib/replace/inet_ntoa.c0000660000000000000000000000247512406075657016554 0ustar rootroot00000000000000/* * Unix SMB/CIFS implementation. * replacement routines for broken systems * Copyright (C) Andrew Tridgell 2003 * Copyright (C) Michael Adam 2008 * * ** NOTE! The following LGPL license applies to the replace * ** library. This does NOT imply that all of Samba is released * ** under the LGPL * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 3 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see . */ #include "replace.h" #include "system/network.h" /** * NOTE: this is not thread safe, but it can't be, either * since it returns a pointer to static memory. */ char *rep_inet_ntoa(struct in_addr ip) { uint8_t *p = (uint8_t *)&ip.s_addr; static char buf[18]; slprintf(buf, 17, "%d.%d.%d.%d", (int)p[0], (int)p[1], (int)p[2], (int)p[3]); return buf; } tdb-1.4.2/lib/replace/inet_ntop.c0000660000000000000000000001163212406075657016566 0ustar rootroot00000000000000/* * Copyright (C) 1996-2001 Internet Software Consortium. * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SOFTWARE CONSORTIUM * DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL * INTERNET SOFTWARE CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING * FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "replace.h" #include "system/network.h" #define NS_INT16SZ 2 #define NS_IN6ADDRSZ 16 /* * WARNING: Don't even consider trying to compile this on a system where * sizeof(int) < 4. sizeof(int) > 4 is fine; all the world's not a VAX. */ static const char *inet_ntop4(const unsigned char *src, char *dst, socklen_t size); #ifdef AF_INET6 static const char *inet_ntop6(const unsigned char *src, char *dst, socklen_t size); #endif /* char * * isc_net_ntop(af, src, dst, size) * convert a network format address to presentation format. * return: * pointer to presentation format address (`dst'), or NULL (see errno). * author: * Paul Vixie, 1996. */ const char * rep_inet_ntop(int af, const void *src, char *dst, socklen_t size) { switch (af) { case AF_INET: return (inet_ntop4(src, dst, size)); #ifdef AF_INET6 case AF_INET6: return (inet_ntop6(src, dst, size)); #endif default: errno = EAFNOSUPPORT; return (NULL); } /* NOTREACHED */ } /* const char * * inet_ntop4(src, dst, size) * format an IPv4 address * return: * `dst' (as a const) * notes: * (1) uses no statics * (2) takes a unsigned char* not an in_addr as input * author: * Paul Vixie, 1996. */ static const char * inet_ntop4(const unsigned char *src, char *dst, socklen_t size) { static const char *fmt = "%u.%u.%u.%u"; char tmp[sizeof "255.255.255.255"]; size_t len; len = snprintf(tmp, sizeof tmp, fmt, src[0], src[1], src[2], src[3]); if (len >= size) { errno = ENOSPC; return (NULL); } memcpy(dst, tmp, len + 1); return (dst); } /* const char * * isc_inet_ntop6(src, dst, size) * convert IPv6 binary address into presentation (printable) format * author: * Paul Vixie, 1996. */ #ifdef AF_INET6 static const char * inet_ntop6(const unsigned char *src, char *dst, socklen_t size) { /* * Note that int32_t and int16_t need only be "at least" large enough * to contain a value of the specified size. On some systems, like * Crays, there is no such thing as an integer variable with 16 bits. * Keep this in mind if you think this function should have been coded * to use pointer overlays. All the world's not a VAX. */ char tmp[sizeof "ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255"], *tp; struct { int base, len; } best, cur; unsigned int words[NS_IN6ADDRSZ / NS_INT16SZ]; int i, inc; /* * Preprocess: * Copy the input (bytewise) array into a wordwise array. * Find the longest run of 0x00's in src[] for :: shorthanding. */ memset(words, '\0', sizeof words); for (i = 0; i < NS_IN6ADDRSZ; i++) words[i / 2] |= (src[i] << ((1 - (i % 2)) << 3)); best.base = -1; best.len = 0; cur.base = -1; cur.len = 0; for (i = 0; i < (NS_IN6ADDRSZ / NS_INT16SZ); i++) { if (words[i] == 0) { if (cur.base == -1) cur.base = i, cur.len = 1; else cur.len++; } else { if (cur.base != -1) { if (best.base == -1 || cur.len > best.len) best = cur; cur.base = -1; } } } if (cur.base != -1) { if (best.base == -1 || cur.len > best.len) best = cur; } if (best.base != -1 && best.len < 2) best.base = -1; /* * Format the result. */ tp = tmp; for (i = 0; i < (NS_IN6ADDRSZ / NS_INT16SZ); i++) { /* Are we inside the best run of 0x00's? */ if (best.base != -1 && i >= best.base && i < (best.base + best.len)) { if (i == best.base) *tp++ = ':'; continue; } /* Are we following an initial run of 0x00s or any real hex? */ if (i != 0) *tp++ = ':'; /* Is this address an encapsulated IPv4? */ if (i == 6 && best.base == 0 && (best.len == 6 || (best.len == 5 && words[5] == 0xffff))) { if (!inet_ntop4(src+12, tp, sizeof tmp - (tp - tmp))) return (NULL); tp += strlen(tp); break; } inc = snprintf(tp, 5, "%x", words[i]); if (inc >= 5) { abort(); } tp += inc; } /* Was it a trailing run of 0x00's? */ if (best.base != -1 && (best.base + best.len) == (NS_IN6ADDRSZ / NS_INT16SZ)) *tp++ = ':'; *tp++ = '\0'; /* * Check for overflow, copy, and we're done. */ if ((size_t)(tp - tmp) > size) { errno = ENOSPC; return (NULL); } memcpy(dst, tmp, tp - tmp); return (dst); } #endif /* AF_INET6 */ tdb-1.4.2/lib/replace/inet_pton.c0000660000000000000000000001201012406075657016555 0ustar rootroot00000000000000/* * Copyright (C) 1996-2001 Internet Software Consortium. * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SOFTWARE CONSORTIUM * DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL * INTERNET SOFTWARE CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING * FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "replace.h" #include "system/network.h" #define NS_INT16SZ 2 #define NS_INADDRSZ 4 #define NS_IN6ADDRSZ 16 /* * WARNING: Don't even consider trying to compile this on a system where * sizeof(int) < 4. sizeof(int) > 4 is fine; all the world's not a VAX. */ static int inet_pton4(const char *src, unsigned char *dst); #ifdef AF_INET6 static int inet_pton6(const char *src, unsigned char *dst); #endif /* int * inet_pton(af, src, dst) * convert from presentation format (which usually means ASCII printable) * to network format (which is usually some kind of binary format). * return: * 1 if the address was valid for the specified address family * 0 if the address wasn't valid (`dst' is untouched in this case) * -1 if some other error occurred (`dst' is untouched in this case, too) * author: * Paul Vixie, 1996. */ int rep_inet_pton(int af, const char *src, void *dst) { switch (af) { case AF_INET: return (inet_pton4(src, dst)); #ifdef AF_INET6 case AF_INET6: return (inet_pton6(src, dst)); #endif default: errno = EAFNOSUPPORT; return (-1); } /* NOTREACHED */ } /* int * inet_pton4(src, dst) * like inet_aton() but without all the hexadecimal and shorthand. * return: * 1 if `src' is a valid dotted quad, else 0. * notice: * does not touch `dst' unless it's returning 1. * author: * Paul Vixie, 1996. */ static int inet_pton4(src, dst) const char *src; unsigned char *dst; { static const char digits[] = "0123456789"; int saw_digit, octets, ch; unsigned char tmp[NS_INADDRSZ], *tp; saw_digit = 0; octets = 0; *(tp = tmp) = 0; while ((ch = *src++) != '\0') { const char *pch; if ((pch = strchr(digits, ch)) != NULL) { unsigned int new = *tp * 10 + (pch - digits); if (new > 255) return (0); *tp = new; if (! saw_digit) { if (++octets > 4) return (0); saw_digit = 1; } } else if (ch == '.' && saw_digit) { if (octets == 4) return (0); *++tp = 0; saw_digit = 0; } else return (0); } if (octets < 4) return (0); memcpy(dst, tmp, NS_INADDRSZ); return (1); } /* int * inet_pton6(src, dst) * convert presentation level address to network order binary form. * return: * 1 if `src' is a valid [RFC1884 2.2] address, else 0. * notice: * (1) does not touch `dst' unless it's returning 1. * (2) :: in a full address is silently ignored. * credit: * inspired by Mark Andrews. * author: * Paul Vixie, 1996. */ #ifdef AF_INET6 static int inet_pton6(src, dst) const char *src; unsigned char *dst; { static const char xdigits_l[] = "0123456789abcdef", xdigits_u[] = "0123456789ABCDEF"; unsigned char tmp[NS_IN6ADDRSZ], *tp, *endp, *colonp; const char *xdigits, *curtok; int ch, saw_xdigit; unsigned int val; memset((tp = tmp), '\0', NS_IN6ADDRSZ); endp = tp + NS_IN6ADDRSZ; colonp = NULL; /* Leading :: requires some special handling. */ if (*src == ':') if (*++src != ':') return (0); curtok = src; saw_xdigit = 0; val = 0; while ((ch = *src++) != '\0') { const char *pch; if ((pch = strchr((xdigits = xdigits_l), ch)) == NULL) pch = strchr((xdigits = xdigits_u), ch); if (pch != NULL) { val <<= 4; val |= (pch - xdigits); if (val > 0xffff) return (0); saw_xdigit = 1; continue; } if (ch == ':') { curtok = src; if (!saw_xdigit) { if (colonp) return (0); colonp = tp; continue; } if (tp + NS_INT16SZ > endp) return (0); *tp++ = (unsigned char) (val >> 8) & 0xff; *tp++ = (unsigned char) val & 0xff; saw_xdigit = 0; val = 0; continue; } if (ch == '.' && ((tp + NS_INADDRSZ) <= endp) && inet_pton4(curtok, tp) > 0) { tp += NS_INADDRSZ; saw_xdigit = 0; break; /* '\0' was seen by inet_pton4(). */ } return (0); } if (saw_xdigit) { if (tp + NS_INT16SZ > endp) return (0); *tp++ = (unsigned char) (val >> 8) & 0xff; *tp++ = (unsigned char) val & 0xff; } if (colonp != NULL) { /* * Since some memmove()'s erroneously fail to handle * overlapping regions, we'll do the shift by hand. */ const int n = tp - colonp; int i; for (i = 1; i <= n; i++) { endp[- i] = colonp[n - i]; colonp[n - i] = 0; } tp = endp; } if (tp != endp) return (0); memcpy(dst, tmp, NS_IN6ADDRSZ); return (1); } #endif tdb-1.4.2/lib/replace/poll.c0000660000000000000000000000640512406075657015537 0ustar rootroot00000000000000/* Unix SMB/CIFS implementation. poll.c - poll wrapper This file is based on code from libssh (LGPLv2.1+ at the time it was downloaded), thus the following copyrights: Copyright (c) 2009-2010 by Andreas Schneider Copyright (c) 2003-2009 by Aris Adamantiadis Copyright (c) 2009 Aleksandar Kanchev Copyright (C) Volker Lendecke 2011 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "replace.h" #include "system/select.h" #ifdef HAVE_SYS_TIME_H #include #endif #ifdef HAVE_SYS_IOCTL_H #include #endif int rep_poll(struct pollfd *fds, nfds_t nfds, int timeout) { fd_set rfds, wfds, efds; struct timeval tv, *ptv; int max_fd; int rc; nfds_t i; if ((fds == NULL) && (nfds != 0)) { errno = EFAULT; return -1; } FD_ZERO(&rfds); FD_ZERO(&wfds); FD_ZERO(&efds); rc = 0; max_fd = 0; /* compute fd_sets and find largest descriptor */ for (i = 0; i < nfds; i++) { if ((fds[i].fd < 0) || (fds[i].fd >= FD_SETSIZE)) { fds[i].revents = POLLNVAL; continue; } if (fds[i].events & (POLLIN | POLLRDNORM)) { FD_SET(fds[i].fd, &rfds); } if (fds[i].events & (POLLOUT | POLLWRNORM | POLLWRBAND)) { FD_SET(fds[i].fd, &wfds); } if (fds[i].events & (POLLPRI | POLLRDBAND)) { FD_SET(fds[i].fd, &efds); } if (fds[i].fd > max_fd && (fds[i].events & (POLLIN | POLLOUT | POLLPRI | POLLRDNORM | POLLRDBAND | POLLWRNORM | POLLWRBAND))) { max_fd = fds[i].fd; } } if (timeout < 0) { ptv = NULL; } else { ptv = &tv; if (timeout == 0) { tv.tv_sec = 0; tv.tv_usec = 0; } else { tv.tv_sec = timeout / 1000; tv.tv_usec = (timeout % 1000) * 1000; } } rc = select(max_fd + 1, &rfds, &wfds, &efds, ptv); if (rc < 0) { return -1; } for (rc = 0, i = 0; i < nfds; i++) { if ((fds[i].fd < 0) || (fds[i].fd >= FD_SETSIZE)) { continue; } fds[i].revents = 0; if (FD_ISSET(fds[i].fd, &rfds)) { int err = errno; int available = 0; int ret; /* support for POLLHUP */ ret = ioctl(fds[i].fd, FIONREAD, &available); if ((ret == -1) || (available == 0)) { fds[i].revents |= POLLHUP; } else { fds[i].revents |= fds[i].events & (POLLIN | POLLRDNORM); } errno = err; } if (FD_ISSET(fds[i].fd, &wfds)) { fds[i].revents |= fds[i].events & (POLLOUT | POLLWRNORM | POLLWRBAND); } if (FD_ISSET(fds[i].fd, &efds)) { fds[i].revents |= fds[i].events & (POLLPRI | POLLRDBAND); } if (fds[i].revents & ~POLLHUP) { rc++; } } return rc; } tdb-1.4.2/lib/replace/replace-test.h0000660000000000000000000000033712406075657017164 0ustar rootroot00000000000000#ifndef __LIB_REPLACE_REPLACE_TEST_H__ #define __LIB_REPLACE_REPLACE_TEST_H__ int libreplace_test_strptime(void); int test_readdir_os2_delete(void); int getifaddrs_test(void); #endif /* __LIB_REPLACE_REPLACE_TEST_H__ */ tdb-1.4.2/lib/replace/replace-testsuite.h0000660000000000000000000000036312406075657020235 0ustar rootroot00000000000000#ifndef __LIB_REPLACE_REPLACE_TESTSUITE_H__ #define __LIB_REPLACE_REPLACE_TESTSUITE_H__ #include struct torture_context; bool torture_local_replace(struct torture_context *ctx); #endif /* __LIB_REPLACE_REPLACE_TESTSUITE_H__ */ tdb-1.4.2/lib/replace/replace.c0000660000000000000000000005247513444661620016206 0ustar rootroot00000000000000/* Unix SMB/CIFS implementation. replacement routines for broken systems Copyright (C) Andrew Tridgell 1992-1998 Copyright (C) Jelmer Vernooij 2005-2008 Copyright (C) Matthieu Patou 2010 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "replace.h" #include "system/filesys.h" #include "system/time.h" #include "system/network.h" #include "system/passwd.h" #include "system/syslog.h" #include "system/locale.h" #include "system/wait.h" #ifdef _WIN32 #define mkdir(d,m) _mkdir(d) #endif void replace_dummy(void); void replace_dummy(void) {} #ifndef HAVE_FTRUNCATE /******************************************************************* ftruncate for operating systems that don't have it ********************************************************************/ int rep_ftruncate(int f, off_t l) { #ifdef HAVE_CHSIZE return chsize(f,l); #elif defined(F_FREESP) struct flock fl; fl.l_whence = 0; fl.l_len = 0; fl.l_start = l; fl.l_type = F_WRLCK; return fcntl(f, F_FREESP, &fl); #else #error "you must have a ftruncate function" #endif } #endif /* HAVE_FTRUNCATE */ #ifndef HAVE_STRLCPY /* * Like strncpy but does not 0 fill the buffer and always null * terminates. bufsize is the size of the destination buffer. * Returns the length of s. */ size_t rep_strlcpy(char *d, const char *s, size_t bufsize) { size_t len = strlen(s); size_t ret = len; if (bufsize <= 0) { return 0; } if (len >= bufsize) { len = bufsize - 1; } memcpy(d, s, len); d[len] = 0; return ret; } #endif #ifndef HAVE_STRLCAT /* like strncat but does not 0 fill the buffer and always null terminates. bufsize is the length of the buffer, which should be one more than the maximum resulting string length */ size_t rep_strlcat(char *d, const char *s, size_t bufsize) { size_t len1 = strnlen(d, bufsize); size_t len2 = strlen(s); size_t ret = len1 + len2; if (len1+len2 >= bufsize) { if (bufsize < (len1+1)) { return ret; } len2 = bufsize - (len1+1); } if (len2 > 0) { memcpy(d+len1, s, len2); d[len1+len2] = 0; } return ret; } #endif #ifndef HAVE_MKTIME /******************************************************************* a mktime() replacement for those who don't have it - contributed by C.A. Lademann Corrections by richard.kettlewell@kewill.com ********************************************************************/ #define MINUTE 60 #define HOUR 60*MINUTE #define DAY 24*HOUR #define YEAR 365*DAY time_t rep_mktime(struct tm *t) { struct tm *u; time_t epoch = 0; int n; int mon [] = { 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 }, y, m, i; if(t->tm_year < 70) return((time_t)-1); n = t->tm_year + 1900 - 1; epoch = (t->tm_year - 70) * YEAR + ((n / 4 - n / 100 + n / 400) - (1969 / 4 - 1969 / 100 + 1969 / 400)) * DAY; y = t->tm_year + 1900; m = 0; for(i = 0; i < t->tm_mon; i++) { epoch += mon [m] * DAY; if(m == 1 && y % 4 == 0 && (y % 100 != 0 || y % 400 == 0)) epoch += DAY; if(++m > 11) { m = 0; y++; } } epoch += (t->tm_mday - 1) * DAY; epoch += t->tm_hour * HOUR + t->tm_min * MINUTE + t->tm_sec; if((u = localtime(&epoch)) != NULL) { t->tm_sec = u->tm_sec; t->tm_min = u->tm_min; t->tm_hour = u->tm_hour; t->tm_mday = u->tm_mday; t->tm_mon = u->tm_mon; t->tm_year = u->tm_year; t->tm_wday = u->tm_wday; t->tm_yday = u->tm_yday; t->tm_isdst = u->tm_isdst; } return(epoch); } #endif /* !HAVE_MKTIME */ #ifndef HAVE_INITGROUPS /**************************************************************************** some systems don't have an initgroups call ****************************************************************************/ int rep_initgroups(char *name, gid_t id) { #ifndef HAVE_SETGROUPS /* yikes! no SETGROUPS or INITGROUPS? how can this work? */ errno = ENOSYS; return -1; #else /* HAVE_SETGROUPS */ #include gid_t *grouplst = NULL; int max_gr = NGROUPS_MAX; int ret; int i,j; struct group *g; char *gr; if((grouplst = malloc(sizeof(gid_t) * max_gr)) == NULL) { errno = ENOMEM; return -1; } grouplst[0] = id; i = 1; while (i < max_gr && ((g = (struct group *)getgrent()) != (struct group *)NULL)) { if (g->gr_gid == id) continue; j = 0; gr = g->gr_mem[0]; while (gr && (*gr != (char)NULL)) { if (strcmp(name,gr) == 0) { grouplst[i] = g->gr_gid; i++; gr = (char *)NULL; break; } gr = g->gr_mem[++j]; } } endgrent(); ret = setgroups(i, grouplst); free(grouplst); return ret; #endif /* HAVE_SETGROUPS */ } #endif /* HAVE_INITGROUPS */ #ifndef HAVE_MEMMOVE /******************************************************************* safely copies memory, ensuring no overlap problems. this is only used if the machine does not have its own memmove(). this is not the fastest algorithm in town, but it will do for our needs. ********************************************************************/ void *rep_memmove(void *dest,const void *src,int size) { unsigned long d,s; int i; if (dest==src || !size) return(dest); d = (unsigned long)dest; s = (unsigned long)src; if ((d >= (s+size)) || (s >= (d+size))) { /* no overlap */ memcpy(dest,src,size); return(dest); } if (d < s) { /* we can forward copy */ if (s-d >= sizeof(int) && !(s%sizeof(int)) && !(d%sizeof(int)) && !(size%sizeof(int))) { /* do it all as words */ int *idest = (int *)dest; int *isrc = (int *)src; size /= sizeof(int); for (i=0;i= sizeof(int) && !(s%sizeof(int)) && !(d%sizeof(int)) && !(size%sizeof(int))) { /* do it all as words */ int *idest = (int *)dest; int *isrc = (int *)src; size /= sizeof(int); for (i=size-1;i>=0;i--) idest[i] = isrc[i]; } else { /* simplest */ char *cdest = (char *)dest; char *csrc = (char *)src; for (i=size-1;i>=0;i--) cdest[i] = csrc[i]; } } return(dest); } #endif /* HAVE_MEMMOVE */ #ifndef HAVE_STRDUP /**************************************************************************** duplicate a string ****************************************************************************/ char *rep_strdup(const char *s) { size_t len; char *ret; if (!s) return(NULL); len = strlen(s)+1; ret = (char *)malloc(len); if (!ret) return(NULL); memcpy(ret,s,len); return(ret); } #endif /* HAVE_STRDUP */ #ifndef HAVE_SETLINEBUF void rep_setlinebuf(FILE *stream) { setvbuf(stream, (char *)NULL, _IOLBF, 0); } #endif /* HAVE_SETLINEBUF */ #ifndef HAVE_VSYSLOG #ifdef HAVE_SYSLOG void rep_vsyslog (int facility_priority, const char *format, va_list arglist) { char *msg = NULL; vasprintf(&msg, format, arglist); if (!msg) return; syslog(facility_priority, "%s", msg); free(msg); } #endif /* HAVE_SYSLOG */ #endif /* HAVE_VSYSLOG */ #ifndef HAVE_STRNLEN /** Some platforms don't have strnlen **/ size_t rep_strnlen(const char *s, size_t max) { size_t len; for (len = 0; len < max; len++) { if (s[len] == '\0') { break; } } return len; } #endif #ifndef HAVE_STRNDUP /** Some platforms don't have strndup. **/ char *rep_strndup(const char *s, size_t n) { char *ret; n = strnlen(s, n); ret = malloc(n+1); if (!ret) return NULL; memcpy(ret, s, n); ret[n] = 0; return ret; } #endif #if !defined(HAVE_WAITPID) && defined(HAVE_WAIT4) int rep_waitpid(pid_t pid,int *status,int options) { return wait4(pid, status, options, NULL); } #endif #ifndef HAVE_SETEUID int rep_seteuid(uid_t euid) { #ifdef HAVE_SETRESUID return setresuid(-1, euid, -1); #else errno = ENOSYS; return -1; #endif } #endif #ifndef HAVE_SETEGID int rep_setegid(gid_t egid) { #ifdef HAVE_SETRESGID return setresgid(-1, egid, -1); #else errno = ENOSYS; return -1; #endif } #endif /******************************************************************* os/2 also doesn't have chroot ********************************************************************/ #ifndef HAVE_CHROOT int rep_chroot(const char *dname) { errno = ENOSYS; return -1; } #endif /***************************************************************** Possibly replace mkstemp if it is broken. *****************************************************************/ #ifndef HAVE_SECURE_MKSTEMP int rep_mkstemp(char *template) { /* have a reasonable go at emulating it. Hope that the system mktemp() isn't completely hopeless */ mktemp(template); if (template[0] == 0) return -1; return open(template, O_CREAT|O_EXCL|O_RDWR, 0600); } #endif #ifndef HAVE_MKDTEMP char *rep_mkdtemp(char *template) { char *dname; if ((dname = mktemp(template))) { if (mkdir(dname, 0700) >= 0) { return dname; } } return NULL; } #endif /***************************************************************** Watch out: this is not thread safe. *****************************************************************/ #ifndef HAVE_PREAD ssize_t rep_pread(int __fd, void *__buf, size_t __nbytes, off_t __offset) { if (lseek(__fd, __offset, SEEK_SET) != __offset) { return -1; } return read(__fd, __buf, __nbytes); } #endif /***************************************************************** Watch out: this is not thread safe. *****************************************************************/ #ifndef HAVE_PWRITE ssize_t rep_pwrite(int __fd, const void *__buf, size_t __nbytes, off_t __offset) { if (lseek(__fd, __offset, SEEK_SET) != __offset) { return -1; } return write(__fd, __buf, __nbytes); } #endif #ifndef HAVE_STRCASESTR char *rep_strcasestr(const char *haystack, const char *needle) { const char *s; size_t nlen = strlen(needle); for (s=haystack;*s;s++) { if (toupper(*needle) == toupper(*s) && strncasecmp(s, needle, nlen) == 0) { return (char *)((uintptr_t)s); } } return NULL; } #endif #ifndef HAVE_STRSEP char *rep_strsep(char **pps, const char *delim) { char *ret = *pps; char *p = *pps; if (p == NULL) { return NULL; } p += strcspn(p, delim); if (*p == '\0') { *pps = NULL; } else { *p = '\0'; *pps = p + 1; } return ret; } #endif #ifndef HAVE_STRTOK_R /* based on GLIBC version, copyright Free Software Foundation */ char *rep_strtok_r(char *s, const char *delim, char **save_ptr) { char *token; if (s == NULL) s = *save_ptr; s += strspn(s, delim); if (*s == '\0') { *save_ptr = s; return NULL; } token = s; s = strpbrk(token, delim); if (s == NULL) { *save_ptr = token + strlen(token); } else { *s = '\0'; *save_ptr = s + 1; } return token; } #endif #ifndef HAVE_STRTOLL long long int rep_strtoll(const char *str, char **endptr, int base) { #ifdef HAVE_STRTOQ return strtoq(str, endptr, base); #elif defined(HAVE___STRTOLL) return __strtoll(str, endptr, base); #elif SIZEOF_LONG == SIZEOF_LONG_LONG return (long long int) strtol(str, endptr, base); #else # error "You need a strtoll function" #endif } #else #ifdef HAVE_BSD_STRTOLL #undef strtoll long long int rep_strtoll(const char *str, char **endptr, int base) { int saved_errno = errno; long long int nb = strtoll(str, endptr, base); /* With glibc EINVAL is only returned if base is not ok */ if (errno == EINVAL) { if (base == 0 || (base >1 && base <37)) { /* Base was ok so it's because we were not * able to make the convertion. * Let's reset errno. */ errno = saved_errno; } } return nb; } #endif /* HAVE_BSD_STRTOLL */ #endif /* HAVE_STRTOLL */ #ifndef HAVE_STRTOULL unsigned long long int rep_strtoull(const char *str, char **endptr, int base) { #ifdef HAVE_STRTOUQ return strtouq(str, endptr, base); #elif defined(HAVE___STRTOULL) return __strtoull(str, endptr, base); #elif SIZEOF_LONG == SIZEOF_LONG_LONG return (unsigned long long int) strtoul(str, endptr, base); #else # error "You need a strtoull function" #endif } #else #ifdef HAVE_BSD_STRTOLL #undef strtoull unsigned long long int rep_strtoull(const char *str, char **endptr, int base) { int saved_errno = errno; unsigned long long int nb = strtoull(str, endptr, base); /* With glibc EINVAL is only returned if base is not ok */ if (errno == EINVAL) { if (base == 0 || (base >1 && base <37)) { /* Base was ok so it's because we were not * able to make the convertion. * Let's reset errno. */ errno = saved_errno; } } return nb; } #endif /* HAVE_BSD_STRTOLL */ #endif /* HAVE_STRTOULL */ #ifndef HAVE_SETENV int rep_setenv(const char *name, const char *value, int overwrite) { char *p; size_t l1, l2; int ret; if (!overwrite && getenv(name)) { return 0; } l1 = strlen(name); l2 = strlen(value); p = malloc(l1+l2+2); if (p == NULL) { return -1; } memcpy(p, name, l1); p[l1] = '='; memcpy(p+l1+1, value, l2); p[l1+l2+1] = 0; ret = putenv(p); if (ret != 0) { free(p); } return ret; } #endif #ifndef HAVE_UNSETENV int rep_unsetenv(const char *name) { extern char **environ; size_t len = strlen(name); size_t i, count; if (environ == NULL || getenv(name) == NULL) { return 0; } for (i=0;environ[i];i++) /* noop */ ; count=i; for (i=0;i= needlelen) { char *p = (char *)memchr(haystack, *(const char *)needle, haystacklen-(needlelen-1)); if (!p) return NULL; if (memcmp(p, needle, needlelen) == 0) { return p; } haystack = p+1; haystacklen -= (p - (const char *)haystack) + 1; } return NULL; } #endif #if !defined(HAVE_VDPRINTF) || !defined(HAVE_C99_VSNPRINTF) int rep_vdprintf(int fd, const char *format, va_list ap) { char *s = NULL; int ret; vasprintf(&s, format, ap); if (s == NULL) { errno = ENOMEM; return -1; } ret = write(fd, s, strlen(s)); free(s); return ret; } #endif #if !defined(HAVE_DPRINTF) || !defined(HAVE_C99_VSNPRINTF) int rep_dprintf(int fd, const char *format, ...) { int ret; va_list ap; va_start(ap, format); ret = vdprintf(fd, format, ap); va_end(ap); return ret; } #endif #ifndef HAVE_GET_CURRENT_DIR_NAME char *rep_get_current_dir_name(void) { char buf[PATH_MAX+1]; char *p; p = getcwd(buf, sizeof(buf)); if (p == NULL) { return NULL; } return strdup(p); } #endif #ifndef HAVE_STRERROR_R int rep_strerror_r(int errnum, char *buf, size_t buflen) { char *s = strerror(errnum); if (strlen(s)+1 > buflen) { errno = ERANGE; return -1; } strncpy(buf, s, buflen); return 0; } #elif (!defined(STRERROR_R_XSI_NOT_GNU)) #undef strerror_r int rep_strerror_r(int errnum, char *buf, size_t buflen) { char *s = strerror_r(errnum, buf, buflen); if (s == NULL) { /* Shouldn't happen, should always get a string */ return EINVAL; } if (s != buf) { strlcpy(buf, s, buflen); if (strlen(s) > buflen - 1) { return ERANGE; } } return 0; } #endif #ifndef HAVE_CLOCK_GETTIME int rep_clock_gettime(clockid_t clk_id, struct timespec *tp) { struct timeval tval; switch (clk_id) { case 0: /* CLOCK_REALTIME :*/ #if defined(HAVE_GETTIMEOFDAY_TZ) || defined(HAVE_GETTIMEOFDAY_TZ_VOID) gettimeofday(&tval,NULL); #else gettimeofday(&tval); #endif tp->tv_sec = tval.tv_sec; tp->tv_nsec = tval.tv_usec * 1000; break; default: errno = EINVAL; return -1; } return 0; } #endif #ifndef HAVE_MEMALIGN void *rep_memalign( size_t align, size_t size ) { #if defined(HAVE_POSIX_MEMALIGN) void *p = NULL; int ret = posix_memalign( &p, align, size ); if ( ret == 0 ) return p; return NULL; #else /* On *BSD systems memaligns doesn't exist, but memory will * be aligned on allocations of > pagesize. */ #if defined(SYSCONF_SC_PAGESIZE) size_t pagesize = (size_t)sysconf(_SC_PAGESIZE); #elif defined(HAVE_GETPAGESIZE) size_t pagesize = (size_t)getpagesize(); #else size_t pagesize = (size_t)-1; #endif if (pagesize == (size_t)-1) { errno = ENOSYS; return NULL; } if (size < pagesize) { size = pagesize; } return malloc(size); #endif } #endif #ifndef HAVE_GETPEEREID int rep_getpeereid(int s, uid_t *uid, gid_t *gid) { #if defined(HAVE_PEERCRED) struct ucred cred; socklen_t cred_len = sizeof(struct ucred); int ret; #undef getsockopt ret = getsockopt(s, SOL_SOCKET, SO_PEERCRED, (void *)&cred, &cred_len); if (ret != 0) { return -1; } if (cred_len != sizeof(struct ucred)) { errno = EINVAL; return -1; } *uid = cred.uid; *gid = cred.gid; return 0; #else errno = ENOSYS; return -1; #endif } #endif #ifndef HAVE_USLEEP int rep_usleep(useconds_t sec) { struct timeval tval; /* * Fake it with select... */ tval.tv_sec = 0; tval.tv_usec = usecs/1000; select(0,NULL,NULL,NULL,&tval); return 0; } #endif /* HAVE_USLEEP */ #ifndef HAVE_SETPROCTITLE void rep_setproctitle(const char *fmt, ...) { } #endif #ifndef HAVE_SETPROCTITLE_INIT void rep_setproctitle_init(int argc, char *argv[], char *envp[]) { } #endif #ifndef HAVE_MEMSET_S # ifndef RSIZE_MAX # define RSIZE_MAX (SIZE_MAX >> 1) # endif int rep_memset_s(void *dest, size_t destsz, int ch, size_t count) { if (dest == NULL) { return EINVAL; } if (destsz > RSIZE_MAX || count > RSIZE_MAX || count > destsz) { return ERANGE; } #if defined(HAVE_MEMSET_EXPLICIT) memset_explicit(dest, destsz, ch, count); #else /* HAVE_MEMSET_EXPLICIT */ memset(dest, ch, count); # if defined(HAVE_GCC_VOLATILE_MEMORY_PROTECTION) /* See http://llvm.org/bugs/show_bug.cgi?id=15495 */ __asm__ volatile("" : : "g"(dest) : "memory"); # endif /* HAVE_GCC_VOLATILE_MEMORY_PROTECTION */ #endif /* HAVE_MEMSET_EXPLICIT */ return 0; } #endif /* HAVE_MEMSET_S */ #ifndef HAVE_GETPROGNAME # ifndef HAVE_PROGRAM_INVOCATION_SHORT_NAME # define PROGNAME_SIZE 32 static char rep_progname[PROGNAME_SIZE]; # endif /* HAVE_PROGRAM_INVOCATION_SHORT_NAME */ const char *rep_getprogname(void) { #ifdef HAVE_PROGRAM_INVOCATION_SHORT_NAME return program_invocation_short_name; #else /* HAVE_PROGRAM_INVOCATION_SHORT_NAME */ FILE *fp = NULL; char cmdline[4096] = {0}; char *p = NULL; pid_t pid; size_t nread; int len; int rc; if (rep_progname[0] != '\0') { return rep_progname; } len = snprintf(rep_progname, sizeof(rep_progname), "%s", ""); if (len <= 0) { return NULL; } pid = getpid(); if (pid <= 1 || pid == (pid_t)-1) { return NULL; } len = snprintf(cmdline, sizeof(cmdline), "/proc/%u/cmdline", (unsigned int)pid); if (len <= 0 || len == sizeof(cmdline)) { return NULL; } fp = fopen(cmdline, "r"); if (fp == NULL) { return NULL; } nread = fread(cmdline, 1, sizeof(cmdline) - 1, fp); rc = fclose(fp); if (rc != 0) { return NULL; } if (nread == 0) { return NULL; } cmdline[nread] = '\0'; p = strrchr(cmdline, '/'); if (p != NULL) { p++; } else { p = cmdline; } len = strlen(p); if (len > PROGNAME_SIZE) { p[PROGNAME_SIZE - 1] = '\0'; } (void)snprintf(rep_progname, sizeof(rep_progname), "%s", p); return rep_progname; #endif /* HAVE_PROGRAM_INVOCATION_SHORT_NAME */ } #endif /* HAVE_GETPROGNAME */ tdb-1.4.2/lib/replace/replace.h0000660000000000000000000005156613526763114016215 0ustar rootroot00000000000000/* Unix SMB/CIFS implementation. macros to go along with the lib/replace/ portability layer code Copyright (C) Andrew Tridgell 2005 Copyright (C) Jelmer Vernooij 2006-2008 Copyright (C) Jeremy Allison 2007. ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifndef _LIBREPLACE_REPLACE_H #define _LIBREPLACE_REPLACE_H #ifndef NO_CONFIG_H #include "config.h" #endif #ifdef HAVE_STANDARDS_H #include #endif /* * Needs to be defined before std*.h and string*.h are included * As it's also needed when Python.h is the first header we * require a global -D__STDC_WANT_LIB_EXT1__=1 */ #ifndef __STDC_WANT_LIB_EXT1__ #error -D__STDC_WANT_LIB_EXT1__=1 required #endif #include #include #include #include #ifndef HAVE_DECL_EWOULDBLOCK #define EWOULDBLOCK EAGAIN #endif #if defined(_MSC_VER) || defined(__MINGW32__) #include "win32_replace.h" #endif #ifdef HAVE_INTTYPES_H #define __STDC_FORMAT_MACROS #include #elif defined(HAVE_STDINT_H) #include /* force off HAVE_INTTYPES_H so that roken doesn't try to include both, which causes a warning storm on irix */ #undef HAVE_INTTYPES_H #endif #ifdef HAVE_MALLOC_H #include #endif #ifndef __PRI64_PREFIX # if __WORDSIZE == 64 && ! defined __APPLE__ # define __PRI64_PREFIX "l" # else # define __PRI64_PREFIX "ll" # endif #endif /* Decimal notation. */ #ifndef PRId8 # define PRId8 "d" #endif #ifndef PRId16 # define PRId16 "d" #endif #ifndef PRId32 # define PRId32 "d" #endif #ifndef PRId64 # define PRId64 __PRI64_PREFIX "d" #endif #ifndef PRIi8 # define PRIi8 "i" #endif #ifndef PRIi16 # define PRIi16 "i" #endif #ifndef PRIi32 # define PRIi32 "i" #endif #ifndef PRIi64 # define PRIi64 __PRI64_PREFIX "i" #endif #ifndef PRIu8 # define PRIu8 "u" #endif #ifndef PRIu16 # define PRIu16 "u" #endif #ifndef PRIu32 # define PRIu32 "u" #endif #ifndef PRIu64 # define PRIu64 __PRI64_PREFIX "u" #endif #ifndef SCNd8 # define SCNd8 "hhd" #endif #ifndef SCNd16 # define SCNd16 "hd" #endif #ifndef SCNd32 # define SCNd32 "d" #endif #ifndef SCNd64 # define SCNd64 __PRI64_PREFIX "d" #endif #ifndef SCNi8 # define SCNi8 "hhi" #endif #ifndef SCNi16 # define SCNi16 "hi" #endif #ifndef SCNi32 # define SCNi32 "i" #endif #ifndef SCNi64 # define SCNi64 __PRI64_PREFIX "i" #endif #ifndef SCNu8 # define SCNu8 "hhu" #endif #ifndef SCNu16 # define SCNu16 "hu" #endif #ifndef SCNu32 # define SCNu32 "u" #endif #ifndef SCNu64 # define SCNu64 __PRI64_PREFIX "u" #endif #ifdef HAVE_BSD_STRING_H #include #endif #ifdef HAVE_BSD_UNISTD_H #include #endif #ifdef HAVE_UNISTD_H #include #endif #ifdef HAVE_STRING_H #include #endif #ifdef HAVE_STRINGS_H #include #endif #ifdef HAVE_SYS_TYPES_H #include #endif #ifdef HAVE_SYS_SYSMACROS_H #include #endif #ifdef HAVE_SETPROCTITLE_H #include #endif #if STDC_HEADERS #include #include #endif #ifdef HAVE_LINUX_TYPES_H /* * This is needed as some broken header files require this to be included early */ #include #endif #ifndef HAVE_STRERROR extern char *sys_errlist[]; #define strerror(i) sys_errlist[i] #endif #ifndef HAVE_ERRNO_DECL extern int errno; #endif #ifndef HAVE_STRDUP #define strdup rep_strdup char *rep_strdup(const char *s); #endif #ifndef HAVE_MEMMOVE #define memmove rep_memmove void *rep_memmove(void *dest,const void *src,int size); #endif #ifndef HAVE_MEMMEM #define memmem rep_memmem void *rep_memmem(const void *haystack, size_t haystacklen, const void *needle, size_t needlelen); #endif #ifndef HAVE_MEMALIGN #define memalign rep_memalign void *rep_memalign(size_t boundary, size_t size); #endif #ifndef HAVE_MKTIME #define mktime rep_mktime /* prototype is in "system/time.h" */ #endif #ifndef HAVE_TIMEGM #define timegm rep_timegm /* prototype is in "system/time.h" */ #endif #ifndef HAVE_UTIME #define utime rep_utime /* prototype is in "system/time.h" */ #endif #ifndef HAVE_UTIMES #define utimes rep_utimes /* prototype is in "system/time.h" */ #endif #ifndef HAVE_STRLCPY #define strlcpy rep_strlcpy size_t rep_strlcpy(char *d, const char *s, size_t bufsize); #endif #ifndef HAVE_STRLCAT #define strlcat rep_strlcat size_t rep_strlcat(char *d, const char *s, size_t bufsize); #endif #ifndef HAVE_CLOSEFROM #define closefrom rep_closefrom int rep_closefrom(int lower); #endif #if (defined(BROKEN_STRNDUP) || !defined(HAVE_STRNDUP)) #undef HAVE_STRNDUP #define strndup rep_strndup char *rep_strndup(const char *s, size_t n); #endif #if (defined(BROKEN_STRNLEN) || !defined(HAVE_STRNLEN)) #undef HAVE_STRNLEN #define strnlen rep_strnlen size_t rep_strnlen(const char *s, size_t n); #endif #if !defined(HAVE_DECL_ENVIRON) # ifdef __APPLE__ # include # define environ (*_NSGetEnviron()) # else /* __APPLE__ */ extern char **environ; # endif /* __APPLE */ #endif /* !defined(HAVE_DECL_ENVIRON) */ #ifndef HAVE_SETENV #define setenv rep_setenv int rep_setenv(const char *name, const char *value, int overwrite); #else #ifndef HAVE_SETENV_DECL int setenv(const char *name, const char *value, int overwrite); #endif #endif #ifndef HAVE_UNSETENV #define unsetenv rep_unsetenv int rep_unsetenv(const char *name); #endif #ifndef HAVE_SETEUID #define seteuid rep_seteuid int rep_seteuid(uid_t); #endif #ifndef HAVE_SETEGID #define setegid rep_setegid int rep_setegid(gid_t); #endif #if (defined(USE_SETRESUID) && !defined(HAVE_SETRESUID_DECL)) /* stupid glibc */ int setresuid(uid_t ruid, uid_t euid, uid_t suid); #endif #if (defined(USE_SETRESUID) && !defined(HAVE_SETRESGID_DECL)) int setresgid(gid_t rgid, gid_t egid, gid_t sgid); #endif #ifndef HAVE_CHOWN #define chown rep_chown int rep_chown(const char *path, uid_t uid, gid_t gid); #endif #ifndef HAVE_CHROOT #define chroot rep_chroot int rep_chroot(const char *dirname); #endif #ifndef HAVE_LINK #define link rep_link int rep_link(const char *oldpath, const char *newpath); #endif #ifndef HAVE_READLINK #define readlink rep_readlink ssize_t rep_readlink(const char *path, char *buf, size_t bufsize); #endif #ifndef HAVE_SYMLINK #define symlink rep_symlink int rep_symlink(const char *oldpath, const char *newpath); #endif #ifndef HAVE_REALPATH #define realpath rep_realpath char *rep_realpath(const char *path, char *resolved_path); #endif #ifndef HAVE_LCHOWN #define lchown rep_lchown int rep_lchown(const char *fname,uid_t uid,gid_t gid); #endif #ifdef HAVE_UNIX_H #include #endif #ifndef HAVE_SETLINEBUF #define setlinebuf rep_setlinebuf void rep_setlinebuf(FILE *); #endif #ifndef HAVE_STRCASESTR #define strcasestr rep_strcasestr char *rep_strcasestr(const char *haystack, const char *needle); #endif #ifndef HAVE_STRSEP #define strsep rep_strsep char *rep_strsep(char **pps, const char *delim); #endif #ifndef HAVE_STRTOK_R #define strtok_r rep_strtok_r char *rep_strtok_r(char *s, const char *delim, char **save_ptr); #endif #ifndef HAVE_STRTOLL #define strtoll rep_strtoll long long int rep_strtoll(const char *str, char **endptr, int base); #else #ifdef HAVE_BSD_STRTOLL #define strtoll rep_strtoll long long int rep_strtoll(const char *str, char **endptr, int base); #endif #endif #ifndef HAVE_STRTOULL #define strtoull rep_strtoull unsigned long long int rep_strtoull(const char *str, char **endptr, int base); #else #ifdef HAVE_BSD_STRTOLL /* yes, it's not HAVE_BSD_STRTOULL */ #define strtoull rep_strtoull unsigned long long int rep_strtoull(const char *str, char **endptr, int base); #endif #endif #ifndef HAVE_FTRUNCATE #define ftruncate rep_ftruncate int rep_ftruncate(int,off_t); #endif #ifndef HAVE_INITGROUPS #define initgroups rep_initgroups int rep_initgroups(char *name, gid_t id); #endif #if !defined(HAVE_BZERO) && defined(HAVE_MEMSET) #define bzero(a,b) memset((a),'\0',(b)) #endif #ifndef HAVE_DLERROR #define dlerror rep_dlerror char *rep_dlerror(void); #endif #ifndef HAVE_DLOPEN #define dlopen rep_dlopen #ifdef DLOPEN_TAKES_UNSIGNED_FLAGS void *rep_dlopen(const char *name, unsigned int flags); #else void *rep_dlopen(const char *name, int flags); #endif #endif #ifndef HAVE_DLSYM #define dlsym rep_dlsym void *rep_dlsym(void *handle, const char *symbol); #endif #ifndef HAVE_DLCLOSE #define dlclose rep_dlclose int rep_dlclose(void *handle); #endif #ifndef HAVE_SOCKETPAIR #define socketpair rep_socketpair /* prototype is in system/network.h */ #endif #ifndef PRINTF_ATTRIBUTE #ifdef HAVE___ATTRIBUTE__ /** Use gcc attribute to check printf fns. a1 is the 1-based index of * the parameter containing the format, and a2 the index of the first * argument. Note that some gcc 2.x versions don't handle this * properly **/ #define PRINTF_ATTRIBUTE(a1, a2) __attribute__ ((format (__printf__, a1, a2))) #else #define PRINTF_ATTRIBUTE(a1, a2) #endif #endif #ifndef _DEPRECATED_ #ifdef HAVE___ATTRIBUTE__ #define _DEPRECATED_ __attribute__ ((deprecated)) #else #define _DEPRECATED_ #endif #endif #if !defined(HAVE_VDPRINTF) || !defined(HAVE_C99_VSNPRINTF) #define vdprintf rep_vdprintf int rep_vdprintf(int fd, const char *format, va_list ap) PRINTF_ATTRIBUTE(2,0); #endif #if !defined(HAVE_DPRINTF) || !defined(HAVE_C99_VSNPRINTF) #define dprintf rep_dprintf int rep_dprintf(int fd, const char *format, ...) PRINTF_ATTRIBUTE(2,3); #endif #if !defined(HAVE_VASPRINTF) || !defined(HAVE_C99_VSNPRINTF) #define vasprintf rep_vasprintf int rep_vasprintf(char **ptr, const char *format, va_list ap) PRINTF_ATTRIBUTE(2,0); #endif #if !defined(HAVE_SNPRINTF) || !defined(HAVE_C99_VSNPRINTF) #define snprintf rep_snprintf int rep_snprintf(char *,size_t ,const char *, ...) PRINTF_ATTRIBUTE(3,4); #endif #if !defined(HAVE_VSNPRINTF) || !defined(HAVE_C99_VSNPRINTF) #define vsnprintf rep_vsnprintf int rep_vsnprintf(char *,size_t ,const char *, va_list ap) PRINTF_ATTRIBUTE(3,0); #endif #if !defined(HAVE_ASPRINTF) || !defined(HAVE_C99_VSNPRINTF) #define asprintf rep_asprintf int rep_asprintf(char **,const char *, ...) PRINTF_ATTRIBUTE(2,3); #endif #if !defined(HAVE_C99_VSNPRINTF) #ifdef REPLACE_BROKEN_PRINTF /* * We do not redefine printf by default * as it breaks the build if system headers * use __attribute__((format(printf, 3, 0))) * instead of __attribute__((format(__printf__, 3, 0))) */ #define printf rep_printf #endif int rep_printf(const char *, ...) PRINTF_ATTRIBUTE(1,2); #endif #if !defined(HAVE_C99_VSNPRINTF) #define fprintf rep_fprintf int rep_fprintf(FILE *stream, const char *, ...) PRINTF_ATTRIBUTE(2,3); #endif #ifndef HAVE_VSYSLOG #ifdef HAVE_SYSLOG #define vsyslog rep_vsyslog void rep_vsyslog (int facility_priority, const char *format, va_list arglist) PRINTF_ATTRIBUTE(2,0); #endif #endif /* we used to use these fns, but now we have good replacements for snprintf and vsnprintf */ #define slprintf snprintf #ifndef HAVE_VA_COPY #undef va_copy #ifdef HAVE___VA_COPY #define va_copy(dest, src) __va_copy(dest, src) #else #define va_copy(dest, src) (dest) = (src) #endif #endif #ifndef HAVE_VOLATILE #define volatile #endif #ifndef HAVE_COMPARISON_FN_T typedef int (*comparison_fn_t)(const void *, const void *); #endif #ifndef HAVE_WORKING_STRPTIME #define strptime rep_strptime struct tm; char *rep_strptime(const char *buf, const char *format, struct tm *tm); #endif #ifndef HAVE_DUP2 #define dup2 rep_dup2 int rep_dup2(int oldfd, int newfd); #endif /* Load header file for dynamic linking stuff */ #ifdef HAVE_DLFCN_H #include #endif #ifndef RTLD_LAZY #define RTLD_LAZY 0 #endif #ifndef RTLD_NOW #define RTLD_NOW 0 #endif #ifndef RTLD_GLOBAL #define RTLD_GLOBAL 0 #endif #ifndef HAVE_SECURE_MKSTEMP #define mkstemp(path) rep_mkstemp(path) int rep_mkstemp(char *temp); #endif #ifndef HAVE_MKDTEMP #define mkdtemp rep_mkdtemp char *rep_mkdtemp(char *template); #endif #ifndef HAVE_PREAD #define pread rep_pread ssize_t rep_pread(int __fd, void *__buf, size_t __nbytes, off_t __offset); #define LIBREPLACE_PREAD_REPLACED 1 #else #define LIBREPLACE_PREAD_NOT_REPLACED 1 #endif #ifndef HAVE_PWRITE #define pwrite rep_pwrite ssize_t rep_pwrite(int __fd, const void *__buf, size_t __nbytes, off_t __offset); #define LIBREPLACE_PWRITE_REPLACED 1 #else #define LIBREPLACE_PWRITE_NOT_REPLACED 1 #endif #if !defined(HAVE_INET_NTOA) || defined(REPLACE_INET_NTOA) #define inet_ntoa rep_inet_ntoa /* prototype is in "system/network.h" */ #endif #ifndef HAVE_INET_PTON #define inet_pton rep_inet_pton /* prototype is in "system/network.h" */ #endif #ifndef HAVE_INET_NTOP #define inet_ntop rep_inet_ntop /* prototype is in "system/network.h" */ #endif #ifndef HAVE_INET_ATON #define inet_aton rep_inet_aton /* prototype is in "system/network.h" */ #endif #ifndef HAVE_CONNECT #define connect rep_connect /* prototype is in "system/network.h" */ #endif #ifndef HAVE_GETHOSTBYNAME #define gethostbyname rep_gethostbyname /* prototype is in "system/network.h" */ #endif #ifndef HAVE_GETIFADDRS #define getifaddrs rep_getifaddrs /* prototype is in "system/network.h" */ #endif #ifndef HAVE_FREEIFADDRS #define freeifaddrs rep_freeifaddrs /* prototype is in "system/network.h" */ #endif #ifndef HAVE_GET_CURRENT_DIR_NAME #define get_current_dir_name rep_get_current_dir_name char *rep_get_current_dir_name(void); #endif #if (!defined(HAVE_STRERROR_R) || !defined(STRERROR_R_XSI_NOT_GNU)) #define strerror_r rep_strerror_r int rep_strerror_r(int errnum, char *buf, size_t buflen); #endif #if !defined(HAVE_CLOCK_GETTIME) #define clock_gettime rep_clock_gettime #endif #ifdef HAVE_LIMITS_H #include #endif #ifdef HAVE_SYS_PARAM_H #include #endif /* The extra casts work around common compiler bugs. */ #define _TYPE_SIGNED(t) (! ((t) 0 < (t) -1)) /* The outer cast is needed to work around a bug in Cray C 5.0.3.0. It is necessary at least when t == time_t. */ #define _TYPE_MINIMUM(t) ((t) (_TYPE_SIGNED (t) \ ? ~ (t) 0 << (sizeof (t) * CHAR_BIT - 1) : (t) 0)) #define _TYPE_MAXIMUM(t) ((t) (~ (t) 0 - _TYPE_MINIMUM (t))) #ifndef UINT16_MAX #define UINT16_MAX 65535 #endif #ifndef UINT32_MAX #define UINT32_MAX (4294967295U) #endif #ifndef UINT64_MAX #define UINT64_MAX ((uint64_t)-1) #endif #ifndef INT64_MAX #define INT64_MAX 9223372036854775807LL #endif #ifndef CHAR_BIT #define CHAR_BIT 8 #endif #ifndef INT32_MAX #define INT32_MAX _TYPE_MAXIMUM(int32_t) #endif #ifdef HAVE_STDBOOL_H #include #endif #if !defined(HAVE_BOOL) #ifdef HAVE__Bool #define bool _Bool #else typedef int bool; #endif #endif #if !defined(HAVE_INTPTR_T) typedef long long intptr_t ; #define __intptr_t_defined #endif #if !defined(HAVE_UINTPTR_T) typedef unsigned long long uintptr_t ; #define __uintptr_t_defined #endif #if !defined(HAVE_PTRDIFF_T) typedef unsigned long long ptrdiff_t ; #endif /* * to prevent from doing a redefine of 'bool' * * IRIX, HPUX, MacOS 10 and Solaris need BOOL_DEFINED * Tru64 needs _BOOL_EXISTS * AIX needs _BOOL,_TRUE,_FALSE */ #ifndef BOOL_DEFINED #define BOOL_DEFINED #endif #ifndef _BOOL_EXISTS #define _BOOL_EXISTS #endif #ifndef _BOOL #define _BOOL #endif #ifndef __bool_true_false_are_defined #define __bool_true_false_are_defined #endif #ifndef true #define true (1) #endif #ifndef false #define false (0) #endif #ifndef _TRUE #define _TRUE true #endif #ifndef _FALSE #define _FALSE false #endif #ifndef HAVE_FUNCTION_MACRO #ifdef HAVE_func_MACRO #define __FUNCTION__ __func__ #else #define __FUNCTION__ ("") #endif #endif #ifndef MIN #define MIN(a,b) ((a)<(b)?(a):(b)) #endif #ifndef MAX #define MAX(a,b) ((a)>(b)?(a):(b)) #endif #if !defined(HAVE_VOLATILE) #define volatile #endif /** this is a warning hack. The idea is to use this everywhere that we get the "discarding const" warning from gcc. That doesn't actually fix the problem of course, but it means that when we do get to cleaning them up we can do it by searching the code for discard_const. It also means that other error types aren't as swamped by the noise of hundreds of const warnings, so we are more likely to notice when we get new errors. Please only add more uses of this macro when you find it _really_ hard to fix const warnings. Our aim is to eventually use this function in only a very few places. Also, please call this via the discard_const_p() macro interface, as that makes the return type safe. */ #define discard_const(ptr) ((void *)((uintptr_t)(ptr))) /** Type-safe version of discard_const */ #define discard_const_p(type, ptr) ((type *)discard_const(ptr)) #ifndef __STRING #define __STRING(x) #x #endif #ifndef __STRINGSTRING #define __STRINGSTRING(x) __STRING(x) #endif #ifndef __LINESTR__ #define __LINESTR__ __STRINGSTRING(__LINE__) #endif #ifndef __location__ #define __location__ __FILE__ ":" __LINESTR__ #endif /** * Zero a structure. */ #define ZERO_STRUCT(x) memset_s((char *)&(x), sizeof(x), 0, sizeof(x)) /** * Zero a structure given a pointer to the structure. */ #define ZERO_STRUCTP(x) do { \ if ((x) != NULL) { \ memset_s((char *)(x), sizeof(*(x)), 0, sizeof(*(x))); \ } \ } while(0) /** * Zero a structure given a pointer to the structure - no zero check */ #define ZERO_STRUCTPN(x) memset_s((char *)(x), sizeof(*(x)), 0, sizeof(*(x))) /** * Zero an array - note that sizeof(array) must work - ie. it must not be a * pointer */ #define ZERO_ARRAY(x) memset_s((char *)(x), sizeof(x), 0, sizeof(x)) /** * Zero a given len of an array */ #define ZERO_ARRAY_LEN(x, l) memset_s((char *)(x), (l), 0, (l)) /** * Work out how many elements there are in a static array. */ #define ARRAY_SIZE(a) (sizeof(a)/sizeof(a[0])) /** * Pointer difference macro */ #define PTR_DIFF(p1,p2) ((ptrdiff_t)(((const char *)(p1)) - (const char *)(p2))) #ifdef __COMPAR_FN_T #define QSORT_CAST (__compar_fn_t) #endif #ifndef QSORT_CAST #define QSORT_CAST (int (*)(const void *, const void *)) #endif #ifndef PATH_MAX #define PATH_MAX 1024 #endif #ifndef MAX_DNS_NAME_LENGTH #define MAX_DNS_NAME_LENGTH 256 /* Actually 255 but +1 for terminating null. */ #endif #ifndef HAVE_CRYPT char *ufc_crypt(const char *key, const char *salt); #define crypt ufc_crypt #else #ifdef HAVE_CRYPT_H #include #endif #endif /* these macros gain us a few percent of speed on gcc */ #if (__GNUC__ >= 3) /* the strange !! is to ensure that __builtin_expect() takes either 0 or 1 as its first argument */ #ifndef likely #define likely(x) __builtin_expect(!!(x), 1) #endif #ifndef unlikely #define unlikely(x) __builtin_expect(!!(x), 0) #endif #else #ifndef likely #define likely(x) (x) #endif #ifndef unlikely #define unlikely(x) (x) #endif #endif #ifndef HAVE_FDATASYNC #define fdatasync(fd) fsync(fd) #elif !defined(HAVE_DECL_FDATASYNC) int fdatasync(int ); #endif /* these are used to mark symbols as local to a shared lib, or * publicly available via the shared lib API */ #ifndef _PUBLIC_ #ifdef HAVE_VISIBILITY_ATTR #define _PUBLIC_ __attribute__((visibility("default"))) #else #define _PUBLIC_ #endif #endif #ifndef _PRIVATE_ #ifdef HAVE_VISIBILITY_ATTR # define _PRIVATE_ __attribute__((visibility("hidden"))) #else # define _PRIVATE_ #endif #endif #ifndef HAVE_POLL #define poll rep_poll /* prototype is in "system/network.h" */ #endif #ifndef HAVE_GETPEEREID #define getpeereid rep_getpeereid int rep_getpeereid(int s, uid_t *uid, gid_t *gid); #endif #ifndef HAVE_USLEEP #define usleep rep_usleep typedef long useconds_t; int usleep(useconds_t); #endif #ifndef HAVE_SETPROCTITLE #define setproctitle rep_setproctitle void rep_setproctitle(const char *fmt, ...) PRINTF_ATTRIBUTE(1, 2); #endif #ifndef HAVE_SETPROCTITLE_INIT #define setproctitle_init rep_setproctitle_init void rep_setproctitle_init(int argc, char *argv[], char *envp[]); #endif #ifndef HAVE_MEMSET_S #define memset_s rep_memset_s int rep_memset_s(void *dest, size_t destsz, int ch, size_t count); #endif #ifndef HAVE_GETPROGNAME #define getprogname rep_getprogname const char *rep_getprogname(void); #endif #ifndef FALL_THROUGH # ifdef HAVE_FALLTHROUGH_ATTRIBUTE # define FALL_THROUGH __attribute__ ((fallthrough)) # else /* HAVE_FALLTHROUGH_ATTRIBUTE */ # define FALL_THROUGH ((void)0) # endif /* HAVE_FALLTHROUGH_ATTRIBUTE */ #endif /* FALL_THROUGH */ bool nss_wrapper_enabled(void); bool nss_wrapper_hosts_enabled(void); bool socket_wrapper_enabled(void); bool uid_wrapper_enabled(void); /* Needed for Solaris atomic_add_XX functions. */ #if defined(HAVE_SYS_ATOMIC_H) #include #endif #endif /* _LIBREPLACE_REPLACE_H */ tdb-1.4.2/lib/replace/snprintf.c0000660000000000000000000010674613444661620016437 0ustar rootroot00000000000000/* * NOTE: If you change this file, please merge it into rsync, samba, etc. */ /* * Copyright Patrick Powell 1995 * This code is based on code written by Patrick Powell (papowell@astart.com) * It may be used for any purpose as long as this notice remains intact * on all source code distributions */ /************************************************************** * Original: * Patrick Powell Tue Apr 11 09:48:21 PDT 1995 * A bombproof version of doprnt (dopr) included. * Sigh. This sort of thing is always nasty do deal with. Note that * the version here does not include floating point... * * snprintf() is used instead of sprintf() as it does limit checks * for string length. This covers a nasty loophole. * * The other functions are there to prevent NULL pointers from * causing nast effects. * * More Recently: * Brandon Long 9/15/96 for mutt 0.43 * This was ugly. It is still ugly. I opted out of floating point * numbers, but the formatter understands just about everything * from the normal C string format, at least as far as I can tell from * the Solaris 2.5 printf(3S) man page. * * Brandon Long 10/22/97 for mutt 0.87.1 * Ok, added some minimal floating point support, which means this * probably requires libm on most operating systems. Don't yet * support the exponent (e,E) and sigfig (g,G). Also, fmtint() * was pretty badly broken, it just wasn't being exercised in ways * which showed it, so that's been fixed. Also, formatted the code * to mutt conventions, and removed dead code left over from the * original. Also, there is now a builtin-test, just compile with: * gcc -DTEST_SNPRINTF -o snprintf snprintf.c -lm * and run snprintf for results. * * Thomas Roessler 01/27/98 for mutt 0.89i * The PGP code was using unsigned hexadecimal formats. * Unfortunately, unsigned formats simply didn't work. * * Michael Elkins 03/05/98 for mutt 0.90.8 * The original code assumed that both snprintf() and vsnprintf() were * missing. Some systems only have snprintf() but not vsnprintf(), so * the code is now broken down under HAVE_SNPRINTF and HAVE_VSNPRINTF. * * Andrew Tridgell (tridge@samba.org) Oct 1998 * fixed handling of %.0f * added test for HAVE_LONG_DOUBLE * * tridge@samba.org, idra@samba.org, April 2001 * got rid of fcvt code (twas buggy and made testing harder) * added C99 semantics * * date: 2002/12/19 19:56:31; author: herb; state: Exp; lines: +2 -0 * actually print args for %g and %e * * date: 2002/06/03 13:37:52; author: jmcd; state: Exp; lines: +8 -0 * Since includes.h isn't included here, VA_COPY has to be defined here. I don't * see any include file that is guaranteed to be here, so I'm defining it * locally. Fixes AIX and Solaris builds. * * date: 2002/06/03 03:07:24; author: tridge; state: Exp; lines: +5 -13 * put the ifdef for HAVE_VA_COPY in one place rather than in lots of * functions * * date: 2002/05/17 14:51:22; author: jmcd; state: Exp; lines: +21 -4 * Fix usage of va_list passed as an arg. Use __va_copy before using it * when it exists. * * date: 2002/04/16 22:38:04; author: idra; state: Exp; lines: +20 -14 * Fix incorrect zpadlen handling in fmtfp. * Thanks to Ollie Oldham for spotting it. * few mods to make it easier to compile the tests. * addedd the "Ollie" test to the floating point ones. * * Martin Pool (mbp@samba.org) April 2003 * Remove NO_CONFIG_H so that the test case can be built within a source * tree with less trouble. * Remove unnecessary SAFE_FREE() definition. * * Martin Pool (mbp@samba.org) May 2003 * Put in a prototype for dummy_snprintf() to quiet compiler warnings. * * Move #endif to make sure VA_COPY, LDOUBLE, etc are defined even * if the C library has some snprintf functions already. * * Darren Tucker (dtucker@zip.com.au) 2005 * Fix bug allowing read overruns of the source string with "%.*s" * Usually harmless unless the read runs outside the process' allocation * (eg if your malloc does guard pages) in which case it will segfault. * From OpenSSH. Also added test for same. * * Simo Sorce (idra@samba.org) Jan 2006 * * Add support for position independent parameters * fix fmtstr now it conforms to sprintf wrt min.max * **************************************************************/ #include "replace.h" #include "system/locale.h" #ifdef TEST_SNPRINTF /* need math library headers for testing */ /* In test mode, we pretend that this system doesn't have any snprintf * functions, regardless of what config.h says. */ # undef HAVE_SNPRINTF # undef HAVE_VSNPRINTF # undef HAVE_C99_VSNPRINTF # undef HAVE_ASPRINTF # undef HAVE_VASPRINTF # include #endif /* TEST_SNPRINTF */ #if defined(HAVE_SNPRINTF) && defined(HAVE_VSNPRINTF) && defined(HAVE_C99_VSNPRINTF) /* only include stdio.h if we are not re-defining snprintf or vsnprintf */ #include /* make the compiler happy with an empty file */ void dummy_snprintf(void); void dummy_snprintf(void) {} #endif /* HAVE_SNPRINTF, etc */ /* yes this really must be a ||. Don't muck with this (tridge) */ #if !defined(HAVE_VSNPRINTF) || !defined(HAVE_C99_VSNPRINTF) #ifdef HAVE_LONG_DOUBLE #define LDOUBLE long double #else #define LDOUBLE double #endif #ifdef HAVE_LONG_LONG #define LLONG long long #else #define LLONG long #endif #ifndef VA_COPY #ifdef HAVE_VA_COPY #define VA_COPY(dest, src) va_copy(dest, src) #else #ifdef HAVE___VA_COPY #define VA_COPY(dest, src) __va_copy(dest, src) #else #define VA_COPY(dest, src) (dest) = (src) #endif #endif /* * dopr(): poor man's version of doprintf */ /* format read states */ #define DP_S_DEFAULT 0 #define DP_S_FLAGS 1 #define DP_S_MIN 2 #define DP_S_DOT 3 #define DP_S_MAX 4 #define DP_S_MOD 5 #define DP_S_CONV 6 #define DP_S_DONE 7 /* format flags - Bits */ #define DP_F_MINUS (1 << 0) #define DP_F_PLUS (1 << 1) #define DP_F_SPACE (1 << 2) #define DP_F_NUM (1 << 3) #define DP_F_ZERO (1 << 4) #define DP_F_UP (1 << 5) #define DP_F_UNSIGNED (1 << 6) /* Conversion Flags */ #define DP_C_CHAR 1 #define DP_C_SHORT 2 #define DP_C_LONG 3 #define DP_C_LDOUBLE 4 #define DP_C_LLONG 5 #define DP_C_SIZET 6 /* Chunk types */ #define CNK_FMT_STR 0 #define CNK_INT 1 #define CNK_OCTAL 2 #define CNK_UINT 3 #define CNK_HEX 4 #define CNK_FLOAT 5 #define CNK_CHAR 6 #define CNK_STRING 7 #define CNK_PTR 8 #define CNK_NUM 9 #define CNK_PRCNT 10 #define char_to_int(p) ((p)- '0') #ifndef MAX #define MAX(p,q) (((p) >= (q)) ? (p) : (q)) #endif struct pr_chunk { int type; /* chunk type */ int num; /* parameter number */ int min; int max; int flags; int cflags; int start; int len; LLONG value; LDOUBLE fvalue; char *strvalue; void *pnum; struct pr_chunk *min_star; struct pr_chunk *max_star; struct pr_chunk *next; }; struct pr_chunk_x { struct pr_chunk **chunks; int num; }; static int dopr(char *buffer, size_t maxlen, const char *format, va_list args_in); static void fmtstr(char *buffer, size_t *currlen, size_t maxlen, char *value, int flags, int min, int max); static void fmtint(char *buffer, size_t *currlen, size_t maxlen, LLONG value, int base, int min, int max, int flags); static void fmtfp(char *buffer, size_t *currlen, size_t maxlen, LDOUBLE fvalue, int min, int max, int flags); static void dopr_outch(char *buffer, size_t *currlen, size_t maxlen, char c); static struct pr_chunk *new_chunk(void); static int add_cnk_list_entry(struct pr_chunk_x **list, int max_num, struct pr_chunk *chunk); static int dopr(char *buffer, size_t maxlen, const char *format, va_list args_in) { char ch; int state; int pflag; int pnum; int pfirst; size_t currlen; va_list args; const char *base; struct pr_chunk *chunks = NULL; struct pr_chunk *cnk = NULL; struct pr_chunk_x *clist = NULL; int max_pos; int ret = -1; VA_COPY(args, args_in); state = DP_S_DEFAULT; pfirst = 1; pflag = 0; pnum = 0; max_pos = 0; base = format; ch = *format++; /* retrieve the string structure as chunks */ while (state != DP_S_DONE) { if (ch == '\0') state = DP_S_DONE; switch(state) { case DP_S_DEFAULT: if (cnk) { cnk->next = new_chunk(); cnk = cnk->next; } else { cnk = new_chunk(); } if (!cnk) goto done; if (!chunks) chunks = cnk; if (ch == '%') { state = DP_S_FLAGS; ch = *format++; } else { cnk->type = CNK_FMT_STR; cnk->start = format - base -1; while ((ch != '\0') && (ch != '%')) ch = *format++; cnk->len = format - base - cnk->start -1; } break; case DP_S_FLAGS: switch (ch) { case '-': cnk->flags |= DP_F_MINUS; ch = *format++; break; case '+': cnk->flags |= DP_F_PLUS; ch = *format++; break; case ' ': cnk->flags |= DP_F_SPACE; ch = *format++; break; case '#': cnk->flags |= DP_F_NUM; ch = *format++; break; case '0': cnk->flags |= DP_F_ZERO; ch = *format++; break; case 'I': /* internationalization not supported yet */ ch = *format++; break; default: state = DP_S_MIN; break; } break; case DP_S_MIN: if (isdigit((unsigned char)ch)) { cnk->min = 10 * cnk->min + char_to_int (ch); ch = *format++; } else if (ch == '$') { if (!pfirst && !pflag) { /* parameters must be all positioned or none */ goto done; } if (pfirst) { pfirst = 0; pflag = 1; } if (cnk->min == 0) /* what ?? */ goto done; cnk->num = cnk->min; cnk->min = 0; ch = *format++; } else if (ch == '*') { if (pfirst) pfirst = 0; cnk->min_star = new_chunk(); if (!cnk->min_star) /* out of memory :-( */ goto done; cnk->min_star->type = CNK_INT; if (pflag) { int num; ch = *format++; if (!isdigit((unsigned char)ch)) { /* parameters must be all positioned or none */ goto done; } for (num = 0; isdigit((unsigned char)ch); ch = *format++) { num = 10 * num + char_to_int(ch); } cnk->min_star->num = num; if (ch != '$') /* what ?? */ goto done; } else { cnk->min_star->num = ++pnum; } max_pos = add_cnk_list_entry(&clist, max_pos, cnk->min_star); if (max_pos == 0) /* out of memory :-( */ goto done; ch = *format++; state = DP_S_DOT; } else { if (pfirst) pfirst = 0; state = DP_S_DOT; } break; case DP_S_DOT: if (ch == '.') { state = DP_S_MAX; ch = *format++; } else { state = DP_S_MOD; } break; case DP_S_MAX: if (isdigit((unsigned char)ch)) { if (cnk->max < 0) cnk->max = 0; cnk->max = 10 * cnk->max + char_to_int (ch); ch = *format++; } else if (ch == '$') { if (!pfirst && !pflag) { /* parameters must be all positioned or none */ goto done; } if (cnk->max <= 0) /* what ?? */ goto done; cnk->num = cnk->max; cnk->max = -1; ch = *format++; } else if (ch == '*') { cnk->max_star = new_chunk(); if (!cnk->max_star) /* out of memory :-( */ goto done; cnk->max_star->type = CNK_INT; if (pflag) { int num; ch = *format++; if (!isdigit((unsigned char)ch)) { /* parameters must be all positioned or none */ goto done; } for (num = 0; isdigit((unsigned char)ch); ch = *format++) { num = 10 * num + char_to_int(ch); } cnk->max_star->num = num; if (ch != '$') /* what ?? */ goto done; } else { cnk->max_star->num = ++pnum; } max_pos = add_cnk_list_entry(&clist, max_pos, cnk->max_star); if (max_pos == 0) /* out of memory :-( */ goto done; ch = *format++; state = DP_S_MOD; } else { state = DP_S_MOD; } break; case DP_S_MOD: switch (ch) { case 'h': cnk->cflags = DP_C_SHORT; ch = *format++; if (ch == 'h') { cnk->cflags = DP_C_CHAR; ch = *format++; } break; case 'l': cnk->cflags = DP_C_LONG; ch = *format++; if (ch == 'l') { /* It's a long long */ cnk->cflags = DP_C_LLONG; ch = *format++; } break; case 'j': cnk->cflags = DP_C_LLONG; ch = *format++; break; case 'L': cnk->cflags = DP_C_LDOUBLE; ch = *format++; break; case 'z': cnk->cflags = DP_C_SIZET; ch = *format++; break; default: break; } state = DP_S_CONV; break; case DP_S_CONV: if (cnk->num == 0) cnk->num = ++pnum; max_pos = add_cnk_list_entry(&clist, max_pos, cnk); if (max_pos == 0) /* out of memory :-( */ goto done; switch (ch) { case 'd': case 'i': cnk->type = CNK_INT; break; case 'o': cnk->type = CNK_OCTAL; cnk->flags |= DP_F_UNSIGNED; break; case 'u': cnk->type = CNK_UINT; cnk->flags |= DP_F_UNSIGNED; break; case 'X': cnk->flags |= DP_F_UP; case 'x': cnk->type = CNK_HEX; cnk->flags |= DP_F_UNSIGNED; break; case 'A': /* hex float not supported yet */ case 'E': case 'G': case 'F': cnk->flags |= DP_F_UP; case 'a': /* hex float not supported yet */ case 'e': case 'f': case 'g': cnk->type = CNK_FLOAT; break; case 'c': cnk->type = CNK_CHAR; break; case 's': cnk->type = CNK_STRING; break; case 'p': cnk->type = CNK_PTR; cnk->flags |= DP_F_UNSIGNED; break; case 'n': cnk->type = CNK_NUM; break; case '%': cnk->type = CNK_PRCNT; break; default: /* Unknown, bail out*/ goto done; } ch = *format++; state = DP_S_DEFAULT; break; case DP_S_DONE: break; default: /* hmm? */ break; /* some picky compilers need this */ } } /* retrieve the format arguments */ for (pnum = 0; pnum < max_pos; pnum++) { int i; if (clist[pnum].num == 0) { /* ignoring a parameter should not be permitted * all parameters must be matched at least once * BUT seem some system ignore this rule ... * at least my glibc based system does --SSS */ #ifdef DEBUG_SNPRINTF printf("parameter at position %d not used\n", pnum+1); #endif /* eat the parameter */ va_arg (args, int); continue; } for (i = 1; i < clist[pnum].num; i++) { if (clist[pnum].chunks[0]->type != clist[pnum].chunks[i]->type) { /* nooo noo no! * all the references to a parameter * must be of the same type */ goto done; } } cnk = clist[pnum].chunks[0]; switch (cnk->type) { case CNK_INT: if (cnk->cflags == DP_C_SHORT) cnk->value = va_arg (args, int); else if (cnk->cflags == DP_C_LONG) cnk->value = va_arg (args, long int); else if (cnk->cflags == DP_C_LLONG) cnk->value = va_arg (args, LLONG); else if (cnk->cflags == DP_C_SIZET) cnk->value = va_arg (args, ssize_t); else cnk->value = va_arg (args, int); for (i = 1; i < clist[pnum].num; i++) { clist[pnum].chunks[i]->value = cnk->value; } break; case CNK_OCTAL: case CNK_UINT: case CNK_HEX: if (cnk->cflags == DP_C_SHORT) cnk->value = va_arg (args, unsigned int); else if (cnk->cflags == DP_C_LONG) cnk->value = (unsigned long int)va_arg (args, unsigned long int); else if (cnk->cflags == DP_C_LLONG) cnk->value = (LLONG)va_arg (args, unsigned LLONG); else if (cnk->cflags == DP_C_SIZET) cnk->value = (size_t)va_arg (args, size_t); else cnk->value = (unsigned int)va_arg (args, unsigned int); for (i = 1; i < clist[pnum].num; i++) { clist[pnum].chunks[i]->value = cnk->value; } break; case CNK_FLOAT: if (cnk->cflags == DP_C_LDOUBLE) cnk->fvalue = va_arg (args, LDOUBLE); else cnk->fvalue = va_arg (args, double); for (i = 1; i < clist[pnum].num; i++) { clist[pnum].chunks[i]->fvalue = cnk->fvalue; } break; case CNK_CHAR: cnk->value = va_arg (args, int); for (i = 1; i < clist[pnum].num; i++) { clist[pnum].chunks[i]->value = cnk->value; } break; case CNK_STRING: cnk->strvalue = va_arg (args, char *); if (!cnk->strvalue) cnk->strvalue = "(NULL)"; for (i = 1; i < clist[pnum].num; i++) { clist[pnum].chunks[i]->strvalue = cnk->strvalue; } break; case CNK_PTR: cnk->strvalue = va_arg (args, void *); for (i = 1; i < clist[pnum].num; i++) { clist[pnum].chunks[i]->strvalue = cnk->strvalue; } break; case CNK_NUM: if (cnk->cflags == DP_C_CHAR) cnk->pnum = va_arg (args, char *); else if (cnk->cflags == DP_C_SHORT) cnk->pnum = va_arg (args, short int *); else if (cnk->cflags == DP_C_LONG) cnk->pnum = va_arg (args, long int *); else if (cnk->cflags == DP_C_LLONG) cnk->pnum = va_arg (args, LLONG *); else if (cnk->cflags == DP_C_SIZET) cnk->pnum = va_arg (args, ssize_t *); else cnk->pnum = va_arg (args, int *); for (i = 1; i < clist[pnum].num; i++) { clist[pnum].chunks[i]->pnum = cnk->pnum; } break; case CNK_PRCNT: break; default: /* what ?? */ goto done; } } /* print out the actual string from chunks */ currlen = 0; cnk = chunks; while (cnk) { int len, min, max; if (cnk->min_star) min = cnk->min_star->value; else min = cnk->min; if (cnk->max_star) max = cnk->max_star->value; else max = cnk->max; switch (cnk->type) { case CNK_FMT_STR: if (maxlen != 0 && maxlen > currlen) { if (maxlen > (currlen + cnk->len)) len = cnk->len; else len = maxlen - currlen; memcpy(&(buffer[currlen]), &(base[cnk->start]), len); } currlen += cnk->len; break; case CNK_INT: case CNK_UINT: fmtint (buffer, &currlen, maxlen, cnk->value, 10, min, max, cnk->flags); break; case CNK_OCTAL: fmtint (buffer, &currlen, maxlen, cnk->value, 8, min, max, cnk->flags); break; case CNK_HEX: fmtint (buffer, &currlen, maxlen, cnk->value, 16, min, max, cnk->flags); break; case CNK_FLOAT: fmtfp (buffer, &currlen, maxlen, cnk->fvalue, min, max, cnk->flags); break; case CNK_CHAR: dopr_outch (buffer, &currlen, maxlen, cnk->value); break; case CNK_STRING: if (max == -1) { max = strlen(cnk->strvalue); } fmtstr (buffer, &currlen, maxlen, cnk->strvalue, cnk->flags, min, max); break; case CNK_PTR: fmtint (buffer, &currlen, maxlen, (long)(cnk->strvalue), 16, min, max, cnk->flags); break; case CNK_NUM: if (cnk->cflags == DP_C_CHAR) *((char *)(cnk->pnum)) = (char)currlen; else if (cnk->cflags == DP_C_SHORT) *((short int *)(cnk->pnum)) = (short int)currlen; else if (cnk->cflags == DP_C_LONG) *((long int *)(cnk->pnum)) = (long int)currlen; else if (cnk->cflags == DP_C_LLONG) *((LLONG *)(cnk->pnum)) = (LLONG)currlen; else if (cnk->cflags == DP_C_SIZET) *((ssize_t *)(cnk->pnum)) = (ssize_t)currlen; else *((int *)(cnk->pnum)) = (int)currlen; break; case CNK_PRCNT: dopr_outch (buffer, &currlen, maxlen, '%'); break; default: /* what ?? */ goto done; } cnk = cnk->next; } if (maxlen != 0) { if (currlen < maxlen - 1) buffer[currlen] = '\0'; else if (maxlen > 0) buffer[maxlen - 1] = '\0'; } ret = currlen; done: va_end(args); while (chunks) { cnk = chunks->next; free(chunks); chunks = cnk; } if (clist) { for (pnum = 0; pnum < max_pos; pnum++) { if (clist[pnum].chunks) free(clist[pnum].chunks); } free(clist); } return ret; } static void fmtstr(char *buffer, size_t *currlen, size_t maxlen, char *value, int flags, int min, int max) { int padlen, strln; /* amount to pad */ int cnt = 0; #ifdef DEBUG_SNPRINTF printf("fmtstr min=%d max=%d s=[%s]\n", min, max, value); #endif if (value == 0) { value = ""; } for (strln = 0; strln < max && value[strln]; ++strln); /* strlen */ padlen = min - strln; if (padlen < 0) padlen = 0; if (flags & DP_F_MINUS) padlen = -padlen; /* Left Justify */ while (padlen > 0) { dopr_outch (buffer, currlen, maxlen, ' '); --padlen; } while (*value && (cnt < max)) { dopr_outch (buffer, currlen, maxlen, *value++); ++cnt; } while (padlen < 0) { dopr_outch (buffer, currlen, maxlen, ' '); ++padlen; } } /* Have to handle DP_F_NUM (ie 0x and 0 alternates) */ static void fmtint(char *buffer, size_t *currlen, size_t maxlen, LLONG value, int base, int min, int max, int flags) { int signvalue = 0; unsigned LLONG uvalue; char convert[22+1]; /* 64-bit value in octal: 22 digits + \0 */ int place = 0; int spadlen = 0; /* amount to space pad */ int zpadlen = 0; /* amount to zero pad */ int caps = 0; if (max < 0) max = 0; uvalue = value; if(!(flags & DP_F_UNSIGNED)) { if( value < 0 ) { signvalue = '-'; uvalue = -value; } else { if (flags & DP_F_PLUS) /* Do a sign (+/i) */ signvalue = '+'; else if (flags & DP_F_SPACE) signvalue = ' '; } } if (flags & DP_F_UP) caps = 1; /* Should characters be upper case? */ do { convert[place++] = (caps? "0123456789ABCDEF":"0123456789abcdef") [uvalue % (unsigned)base ]; uvalue = (uvalue / (unsigned)base ); } while(uvalue && (place < sizeof(convert))); if (place == sizeof(convert)) place--; convert[place] = 0; zpadlen = max - place; spadlen = min - MAX (max, place) - (signvalue ? 1 : 0); if (zpadlen < 0) zpadlen = 0; if (spadlen < 0) spadlen = 0; if (flags & DP_F_ZERO) { zpadlen = MAX(zpadlen, spadlen); spadlen = 0; } if (flags & DP_F_MINUS) spadlen = -spadlen; /* Left Justifty */ #ifdef DEBUG_SNPRINTF printf("zpad: %d, spad: %d, min: %d, max: %d, place: %d\n", zpadlen, spadlen, min, max, place); #endif /* Spaces */ while (spadlen > 0) { dopr_outch (buffer, currlen, maxlen, ' '); --spadlen; } /* Sign */ if (signvalue) dopr_outch (buffer, currlen, maxlen, signvalue); /* Zeros */ if (zpadlen > 0) { while (zpadlen > 0) { dopr_outch (buffer, currlen, maxlen, '0'); --zpadlen; } } /* Digits */ while (place > 0) dopr_outch (buffer, currlen, maxlen, convert[--place]); /* Left Justified spaces */ while (spadlen < 0) { dopr_outch (buffer, currlen, maxlen, ' '); ++spadlen; } } static LDOUBLE abs_val(LDOUBLE value) { LDOUBLE result = value; if (value < 0) result = -value; return result; } static LDOUBLE POW10(int exp) { LDOUBLE result = 1; while (exp) { result *= 10; exp--; } return result; } static LLONG ROUND(LDOUBLE value) { LLONG intpart; intpart = (LLONG)value; value = value - intpart; if (value >= 0.5) intpart++; return intpart; } /* a replacement for modf that doesn't need the math library. Should be portable, but slow */ static double my_modf(double x0, double *iptr) { int i; LLONG l=0; double x = x0; double f = 1.0; for (i=0;i<100;i++) { l = (long)x; if (l <= (x+1) && l >= (x-1)) break; x *= 0.1; f *= 10.0; } if (i == 100) { /* yikes! the number is beyond what we can handle. What do we do? */ (*iptr) = 0; return 0; } if (i != 0) { double i2; double ret; ret = my_modf(x0-l*f, &i2); (*iptr) = l*f + i2; return ret; } (*iptr) = l; return x - (*iptr); } static void fmtfp (char *buffer, size_t *currlen, size_t maxlen, LDOUBLE fvalue, int min, int max, int flags) { int signvalue = 0; double ufvalue; char iconvert[311]; char fconvert[311]; int iplace = 0; int fplace = 0; int padlen = 0; /* amount to pad */ int zpadlen = 0; int caps = 0; int idx; double intpart; double fracpart; double temp; /* * AIX manpage says the default is 0, but Solaris says the default * is 6, and sprintf on AIX defaults to 6 */ if (max < 0) max = 6; ufvalue = abs_val (fvalue); if (fvalue < 0) { signvalue = '-'; } else { if (flags & DP_F_PLUS) { /* Do a sign (+/i) */ signvalue = '+'; } else { if (flags & DP_F_SPACE) signvalue = ' '; } } #if 0 if (flags & DP_F_UP) caps = 1; /* Should characters be upper case? */ #endif #if 0 if (max == 0) ufvalue += 0.5; /* if max = 0 we must round */ #endif /* * Sorry, we only support 9 digits past the decimal because of our * conversion method */ if (max > 9) max = 9; /* We "cheat" by converting the fractional part to integer by * multiplying by a factor of 10 */ temp = ufvalue; my_modf(temp, &intpart); fracpart = ROUND((POW10(max)) * (ufvalue - intpart)); if (fracpart >= POW10(max)) { intpart++; fracpart -= POW10(max); } /* Convert integer part */ do { temp = intpart*0.1; my_modf(temp, &intpart); idx = (int) ((temp -intpart +0.05)* 10.0); /* idx = (int) (((double)(temp*0.1) -intpart +0.05) *10.0); */ /* printf ("%llf, %f, %x\n", temp, intpart, idx); */ iconvert[iplace++] = (caps? "0123456789ABCDEF":"0123456789abcdef")[idx]; } while (intpart && (iplace < 311)); if (iplace == 311) iplace--; iconvert[iplace] = 0; /* Convert fractional part */ if (fracpart) { do { temp = fracpart*0.1; my_modf(temp, &fracpart); idx = (int) ((temp -fracpart +0.05)* 10.0); /* idx = (int) ((((temp/10) -fracpart) +0.05) *10); */ /* printf ("%lf, %lf, %ld\n", temp, fracpart, idx ); */ fconvert[fplace++] = (caps? "0123456789ABCDEF":"0123456789abcdef")[idx]; } while(fracpart && (fplace < 311)); if (fplace == 311) fplace--; } fconvert[fplace] = 0; /* -1 for decimal point, another -1 if we are printing a sign */ padlen = min - iplace - max - 1 - ((signvalue) ? 1 : 0); zpadlen = max - fplace; if (zpadlen < 0) zpadlen = 0; if (padlen < 0) padlen = 0; if (flags & DP_F_MINUS) padlen = -padlen; /* Left Justifty */ if ((flags & DP_F_ZERO) && (padlen > 0)) { if (signvalue) { dopr_outch (buffer, currlen, maxlen, signvalue); --padlen; signvalue = 0; } while (padlen > 0) { dopr_outch (buffer, currlen, maxlen, '0'); --padlen; } } while (padlen > 0) { dopr_outch (buffer, currlen, maxlen, ' '); --padlen; } if (signvalue) dopr_outch (buffer, currlen, maxlen, signvalue); while (iplace > 0) dopr_outch (buffer, currlen, maxlen, iconvert[--iplace]); #ifdef DEBUG_SNPRINTF printf("fmtfp: fplace=%d zpadlen=%d\n", fplace, zpadlen); #endif /* * Decimal point. This should probably use locale to find the correct * char to print out. */ if (max > 0) { dopr_outch (buffer, currlen, maxlen, '.'); while (zpadlen > 0) { dopr_outch (buffer, currlen, maxlen, '0'); --zpadlen; } while (fplace > 0) dopr_outch (buffer, currlen, maxlen, fconvert[--fplace]); } while (padlen < 0) { dopr_outch (buffer, currlen, maxlen, ' '); ++padlen; } } static void dopr_outch(char *buffer, size_t *currlen, size_t maxlen, char c) { if (*currlen < maxlen) { buffer[(*currlen)] = c; } (*currlen)++; } static struct pr_chunk *new_chunk(void) { struct pr_chunk *new_c = (struct pr_chunk *)malloc(sizeof(struct pr_chunk)); if (!new_c) return NULL; new_c->type = 0; new_c->num = 0; new_c->min = 0; new_c->min_star = NULL; new_c->max = -1; new_c->max_star = NULL; new_c->flags = 0; new_c->cflags = 0; new_c->start = 0; new_c->len = 0; new_c->value = 0; new_c->fvalue = 0; new_c->strvalue = NULL; new_c->pnum = NULL; new_c->next = NULL; return new_c; } static int add_cnk_list_entry(struct pr_chunk_x **list, int max_num, struct pr_chunk *chunk) { struct pr_chunk_x *l; struct pr_chunk **c; int max; int cnum; int i, pos; if (chunk->num > max_num) { max = chunk->num; if (*list == NULL) { l = (struct pr_chunk_x *)malloc(sizeof(struct pr_chunk_x) * max); pos = 0; } else { l = (struct pr_chunk_x *)realloc(*list, sizeof(struct pr_chunk_x) * max); pos = max_num; } if (l == NULL) { for (i = 0; i < max; i++) { if ((*list)[i].chunks) free((*list)[i].chunks); } return 0; } for (i = pos; i < max; i++) { l[i].chunks = NULL; l[i].num = 0; } } else { l = *list; max = max_num; } i = chunk->num - 1; cnum = l[i].num + 1; if (l[i].chunks == NULL) { c = (struct pr_chunk **)malloc(sizeof(struct pr_chunk *) * cnum); } else { c = (struct pr_chunk **)realloc(l[i].chunks, sizeof(struct pr_chunk *) * cnum); } if (c == NULL) { for (i = 0; i < max; i++) { if (l[i].chunks) free(l[i].chunks); } return 0; } c[l[i].num] = chunk; l[i].chunks = c; l[i].num = cnum; *list = l; return max; } int rep_vsnprintf (char *str, size_t count, const char *fmt, va_list args) { return dopr(str, count, fmt, args); } #endif /* yes this really must be a ||. Don't muck with this (tridge) * * The logic for these two is that we need our own definition if the * OS *either* has no definition of *sprintf, or if it does have one * that doesn't work properly according to the autoconf test. */ #if !defined(HAVE_SNPRINTF) || !defined(HAVE_C99_VSNPRINTF) int rep_snprintf(char *str,size_t count,const char *fmt,...) { size_t ret; va_list ap; va_start(ap, fmt); ret = vsnprintf(str, count, fmt, ap); va_end(ap); return ret; } #endif #ifndef HAVE_C99_VSNPRINTF int rep_printf(const char *fmt, ...) { va_list ap; int ret; char *s; s = NULL; va_start(ap, fmt); ret = vasprintf(&s, fmt, ap); va_end(ap); if (s) { fwrite(s, 1, strlen(s), stdout); } free(s); return ret; } #endif #ifndef HAVE_C99_VSNPRINTF int rep_fprintf(FILE *stream, const char *fmt, ...) { va_list ap; int ret; char *s; s = NULL; va_start(ap, fmt); ret = vasprintf(&s, fmt, ap); va_end(ap); if (s) { fwrite(s, 1, strlen(s), stream); } free(s); return ret; } #endif #endif #if !defined(HAVE_VASPRINTF) || !defined(HAVE_C99_VSNPRINTF) int rep_vasprintf(char **ptr, const char *format, va_list ap) { int ret; va_list ap2; VA_COPY(ap2, ap); ret = vsnprintf(NULL, 0, format, ap2); va_end(ap2); if (ret < 0) return ret; (*ptr) = (char *)malloc(ret+1); if (!*ptr) return -1; VA_COPY(ap2, ap); ret = vsnprintf(*ptr, ret+1, format, ap2); va_end(ap2); return ret; } #endif #if !defined(HAVE_ASPRINTF) || !defined(HAVE_C99_VSNPRINTF) int rep_asprintf(char **ptr, const char *format, ...) { va_list ap; int ret; *ptr = NULL; va_start(ap, format); ret = vasprintf(ptr, format, ap); va_end(ap); return ret; } #endif #ifdef TEST_SNPRINTF int sprintf(char *str,const char *fmt,...); int printf(const char *fmt,...); int main (void) { char buf1[1024]; char buf2[1024]; char *buf3; char *fp_fmt[] = { "%1.1f", "%-1.5f", "%1.5f", "%123.9f", "%10.5f", "% 10.5f", "%+22.9f", "%+4.9f", "%01.3f", "%4f", "%3.1f", "%3.2f", "%.0f", "%f", "%-8.8f", "%-9.9f", NULL }; double fp_nums[] = { 6442452944.1234, -1.5, 134.21, 91340.2, 341.1234, 203.9, 0.96, 0.996, 0.9996, 1.996, 4.136, 5.030201, 0.00205, /* END LIST */ 0}; char *int_fmt[] = { "%-1.5d", "%1.5d", "%123.9d", "%5.5d", "%10.5d", "% 10.5d", "%+22.33d", "%01.3d", "%4d", "%d", NULL }; long int_nums[] = { -1, 134, 91340, 341, 0203, 1234567890, 0}; char *str_fmt[] = { "%10.5s", "%-10.5s", "%5.10s", "%-5.10s", "%10.1s", "%0.10s", "%10.0s", "%1.10s", "%s", "%.1s", "%.10s", "%10s", NULL }; char *str_vals[] = {"hello", "a", "", "a longer string", NULL}; #ifdef HAVE_LONG_LONG char *ll_fmt[] = { "%llu", NULL }; LLONG ll_nums[] = { 134, 91340, 341, 0203, 1234567890, 128006186140000000LL, 0}; #endif int x, y; int fail = 0; int num = 0; int l1, l2; char *ss_fmt[] = { "%zd", "%zu", NULL }; size_t ss_nums[] = {134, 91340, 123456789, 0203, 1234567890, 0}; printf ("Testing snprintf format codes against system sprintf...\n"); for (x = 0; fp_fmt[x] ; x++) { for (y = 0; fp_nums[y] != 0 ; y++) { buf1[0] = buf2[0] = '\0'; l1 = snprintf(buf1, sizeof(buf1), fp_fmt[x], fp_nums[y]); l2 = sprintf (buf2, fp_fmt[x], fp_nums[y]); buf1[1023] = buf2[1023] = '\0'; if (strcmp (buf1, buf2) || (l1 != l2)) { printf("snprintf doesn't match Format: %s\n\tsnprintf(%d) = [%s]\n\t sprintf(%d) = [%s]\n", fp_fmt[x], l1, buf1, l2, buf2); fail++; } num++; } } for (x = 0; int_fmt[x] ; x++) { for (y = 0; int_nums[y] != 0 ; y++) { buf1[0] = buf2[0] = '\0'; l1 = snprintf(buf1, sizeof(buf1), int_fmt[x], int_nums[y]); l2 = sprintf (buf2, int_fmt[x], int_nums[y]); buf1[1023] = buf2[1023] = '\0'; if (strcmp (buf1, buf2) || (l1 != l2)) { printf("snprintf doesn't match Format: %s\n\tsnprintf(%d) = [%s]\n\t sprintf(%d) = [%s]\n", int_fmt[x], l1, buf1, l2, buf2); fail++; } num++; } } for (x = 0; str_fmt[x] ; x++) { for (y = 0; str_vals[y] != 0 ; y++) { buf1[0] = buf2[0] = '\0'; l1 = snprintf(buf1, sizeof(buf1), str_fmt[x], str_vals[y]); l2 = sprintf (buf2, str_fmt[x], str_vals[y]); buf1[1023] = buf2[1023] = '\0'; if (strcmp (buf1, buf2) || (l1 != l2)) { printf("snprintf doesn't match Format: %s\n\tsnprintf(%d) = [%s]\n\t sprintf(%d) = [%s]\n", str_fmt[x], l1, buf1, l2, buf2); fail++; } num++; } } #ifdef HAVE_LONG_LONG for (x = 0; ll_fmt[x] ; x++) { for (y = 0; ll_nums[y] != 0 ; y++) { buf1[0] = buf2[0] = '\0'; l1 = snprintf(buf1, sizeof(buf1), ll_fmt[x], ll_nums[y]); l2 = sprintf (buf2, ll_fmt[x], ll_nums[y]); buf1[1023] = buf2[1023] = '\0'; if (strcmp (buf1, buf2) || (l1 != l2)) { printf("snprintf doesn't match Format: %s\n\tsnprintf(%d) = [%s]\n\t sprintf(%d) = [%s]\n", ll_fmt[x], l1, buf1, l2, buf2); fail++; } num++; } } #endif #define BUFSZ 2048 buf1[0] = buf2[0] = '\0'; if ((buf3 = malloc(BUFSZ)) == NULL) { fail++; } else { num++; memset(buf3, 'a', BUFSZ); snprintf(buf1, sizeof(buf1), "%.*s", 1, buf3); buf1[1023] = '\0'; if (strcmp(buf1, "a") != 0) { printf("length limit buf1 '%s' expected 'a'\n", buf1); fail++; } } buf1[0] = buf2[0] = '\0'; l1 = snprintf(buf1, sizeof(buf1), "%4$*1$d %2$s %3$*1$.*1$f", 3, "pos test", 12.3456, 9); l2 = sprintf(buf2, "%4$*1$d %2$s %3$*1$.*1$f", 3, "pos test", 12.3456, 9); buf1[1023] = buf2[1023] = '\0'; if (strcmp(buf1, buf2) || (l1 != l2)) { printf("snprintf doesn't match Format: %s\n\tsnprintf(%d) = [%s]\n\t sprintf(%d) = [%s]\n", "%4$*1$d %2$s %3$*1$.*1$f", l1, buf1, l2, buf2); fail++; } buf1[0] = buf2[0] = '\0'; l1 = snprintf(buf1, sizeof(buf1), "%4$*4$d %2$s %3$*4$.*4$f", 3, "pos test", 12.3456, 9); l2 = sprintf(buf2, "%4$*4$d %2$s %3$*4$.*4$f", 3, "pos test", 12.3456, 9); buf1[1023] = buf2[1023] = '\0'; if (strcmp(buf1, buf2)) { printf("snprintf doesn't match Format: %s\n\tsnprintf(%d) = [%s]\n\t sprintf(%d) = [%s]\n", "%4$*1$d %2$s %3$*1$.*1$f", l1, buf1, l2, buf2); fail++; } for (x = 0; ss_fmt[x] ; x++) { for (y = 0; ss_nums[y] != 0 ; y++) { buf1[0] = buf2[0] = '\0'; l1 = snprintf(buf1, sizeof(buf1), ss_fmt[x], ss_nums[y]); l2 = sprintf (buf2, ss_fmt[x], ss_nums[y]); buf1[1023] = buf2[1023] = '\0'; if (strcmp (buf1, buf2) || (l1 != l2)) { printf("snprintf doesn't match Format: %s\n\tsnprintf(%d) = [%s]\n\t sprintf(%d) = [%s]\n", ss_fmt[x], l1, buf1, l2, buf2); fail++; } num++; } } #if 0 buf1[0] = buf2[0] = '\0'; l1 = snprintf(buf1, sizeof(buf1), "%lld", (LLONG)1234567890); l2 = sprintf(buf2, "%lld", (LLONG)1234567890); buf1[1023] = buf2[1023] = '\0'; if (strcmp(buf1, buf2)) { printf("snprintf doesn't match Format: %s\n\tsnprintf(%d) = [%s]\n\t sprintf(%d) = [%s]\n", "%lld", l1, buf1, l2, buf2); fail++; } buf1[0] = buf2[0] = '\0'; l1 = snprintf(buf1, sizeof(buf1), "%Lf", (LDOUBLE)890.1234567890123); l2 = sprintf(buf2, "%Lf", (LDOUBLE)890.1234567890123); buf1[1023] = buf2[1023] = '\0'; if (strcmp(buf1, buf2)) { printf("snprintf doesn't match Format: %s\n\tsnprintf(%d) = [%s]\n\t sprintf(%d) = [%s]\n", "%Lf", l1, buf1, l2, buf2); fail++; } #endif printf ("%d tests failed out of %d.\n", fail, num); printf("seeing how many digits we support\n"); { double v0 = 0.12345678901234567890123456789012345678901; for (x=0; x<100; x++) { double p = pow(10, x); double r = v0*p; snprintf(buf1, sizeof(buf1), "%1.1f", r); sprintf(buf2, "%1.1f", r); if (strcmp(buf1, buf2)) { printf("we seem to support %d digits\n", x-1); break; } } } return 0; } #endif /* TEST_SNPRINTF */ tdb-1.4.2/lib/replace/socket.c0000660000000000000000000000230712406075657016056 0ustar rootroot00000000000000/* * Unix SMB/CIFS implementation. * * Dummy replacements for socket functions. * * Copyright (C) Michael Adam 2008 * * ** NOTE! The following LGPL license applies to the replace * ** library. This does NOT imply that all of Samba is released * ** under the LGPL * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 3 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see . */ #include "replace.h" #include "system/network.h" int rep_connect(int sockfd, const struct sockaddr *serv_addr, socklen_t addrlen) { errno = ENOSYS; return -1; } struct hostent *rep_gethostbyname(const char *name) { errno = ENOSYS; return NULL; } tdb-1.4.2/lib/replace/socketpair.c0000660000000000000000000000250112406075657016726 0ustar rootroot00000000000000/* * Unix SMB/CIFS implementation. * replacement routines for broken systems * Copyright (C) Jelmer Vernooij 2006 * Copyright (C) Michael Adam 2008 * * ** NOTE! The following LGPL license applies to the replace * ** library. This does NOT imply that all of Samba is released * ** under the LGPL * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 3 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see . */ #include "replace.h" #include "system/network.h" int rep_socketpair(int d, int type, int protocol, int sv[2]) { if (d != AF_UNIX) { errno = EAFNOSUPPORT; return -1; } if (protocol != 0) { errno = EPROTONOSUPPORT; return -1; } if (type != SOCK_STREAM) { errno = EOPNOTSUPP; return -1; } return pipe(sv); } tdb-1.4.2/lib/replace/strptime.c0000660000000000000000000005674113444661620016442 0ustar rootroot00000000000000/* Convert a string representation of time to a time value. Copyright (C) 1996, 1997, 1998, 1999, 2000 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Ulrich Drepper , 1996. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; see the file COPYING.LIB. If not, see . */ /* XXX This version of the implementation is not really complete. Some of the fields cannot add information alone. But if seeing some of them in the same format (such as year, week and weekday) this is enough information for determining the date. */ #include "replace.h" #include "system/locale.h" #include "system/time.h" #ifndef __P # if defined (__GNUC__) || (defined (__STDC__) && __STDC__) # define __P(args) args # else # define __P(args) () # endif /* GCC. */ #endif /* Not __P. */ #if ! HAVE_LOCALTIME_R && ! defined localtime_r # ifdef _LIBC # define localtime_r __localtime_r # else /* Approximate localtime_r as best we can in its absence. */ # define localtime_r my_localtime_r static struct tm *localtime_r __P ((const time_t *, struct tm *)); static struct tm * localtime_r (t, tp) const time_t *t; struct tm *tp; { struct tm *l = localtime (t); if (! l) return 0; *tp = *l; return tp; } # endif /* ! _LIBC */ #endif /* ! HAVE_LOCALTIME_R && ! defined (localtime_r) */ #define match_char(ch1, ch2) if (ch1 != ch2) return NULL #if defined __GNUC__ && __GNUC__ >= 2 # define match_string(cs1, s2) \ ({ size_t len = strlen (cs1); \ int result = strncasecmp ((cs1), (s2), len) == 0; \ if (result) (s2) += len; \ result; }) #else /* Oh come on. Get a reasonable compiler. */ # define match_string(cs1, s2) \ (strncasecmp ((cs1), (s2), strlen (cs1)) ? 0 : ((s2) += strlen (cs1), 1)) #endif /* We intentionally do not use isdigit() for testing because this will lead to problems with the wide character version. */ #define get_number(from, to, n) \ do { \ int __n = n; \ val = 0; \ while (*rp == ' ') \ ++rp; \ if (*rp < '0' || *rp > '9') \ return NULL; \ do { \ val *= 10; \ val += *rp++ - '0'; \ } while (--__n > 0 && val * 10 <= to && *rp >= '0' && *rp <= '9'); \ if (val < from || val > to) \ return NULL; \ } while (0) #ifdef _NL_CURRENT # define get_alt_number(from, to, n) \ ({ \ __label__ do_normal; \ if (*decided != raw) \ { \ const char *alts = _NL_CURRENT (LC_TIME, ALT_DIGITS); \ int __n = n; \ int any = 0; \ while (*rp == ' ') \ ++rp; \ val = 0; \ do { \ val *= 10; \ while (*alts != '\0') \ { \ size_t len = strlen (alts); \ if (strncasecmp (alts, rp, len) == 0) \ break; \ alts += len + 1; \ ++val; \ } \ if (*alts == '\0') \ { \ if (*decided == not && ! any) \ goto do_normal; \ /* If we haven't read anything it's an error. */ \ if (! any) \ return NULL; \ /* Correct the premature multiplication. */ \ val /= 10; \ break; \ } \ else \ *decided = loc; \ } while (--__n > 0 && val * 10 <= to); \ if (val < from || val > to) \ return NULL; \ } \ else \ { \ do_normal: \ get_number (from, to, n); \ } \ 0; \ }) #else # define get_alt_number(from, to, n) \ /* We don't have the alternate representation. */ \ get_number(from, to, n) #endif #define recursive(new_fmt) \ (*(new_fmt) != '\0' \ && (rp = strptime_internal (rp, (new_fmt), tm, decided, era_cnt)) != NULL) #ifdef _LIBC /* This is defined in locale/C-time.c in the GNU libc. */ extern const struct locale_data _nl_C_LC_TIME; extern const unsigned short int __mon_yday[2][13]; # define weekday_name (&_nl_C_LC_TIME.values[_NL_ITEM_INDEX (DAY_1)].string) # define ab_weekday_name \ (&_nl_C_LC_TIME.values[_NL_ITEM_INDEX (ABDAY_1)].string) # define month_name (&_nl_C_LC_TIME.values[_NL_ITEM_INDEX (MON_1)].string) # define ab_month_name (&_nl_C_LC_TIME.values[_NL_ITEM_INDEX (ABMON_1)].string) # define HERE_D_T_FMT (_nl_C_LC_TIME.values[_NL_ITEM_INDEX (D_T_FMT)].string) # define HERE_D_FMT (_nl_C_LC_TIME.values[_NL_ITEM_INDEX (D_FMT)].string) # define HERE_AM_STR (_nl_C_LC_TIME.values[_NL_ITEM_INDEX (AM_STR)].string) # define HERE_PM_STR (_nl_C_LC_TIME.values[_NL_ITEM_INDEX (PM_STR)].string) # define HERE_T_FMT_AMPM \ (_nl_C_LC_TIME.values[_NL_ITEM_INDEX (T_FMT_AMPM)].string) # define HERE_T_FMT (_nl_C_LC_TIME.values[_NL_ITEM_INDEX (T_FMT)].string) # define strncasecmp(s1, s2, n) __strncasecmp (s1, s2, n) #else static char const weekday_name[][10] = { "Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday" }; static char const ab_weekday_name[][4] = { "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat" }; static char const month_name[][10] = { "January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December" }; static char const ab_month_name[][4] = { "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec" }; # define HERE_D_T_FMT "%a %b %e %H:%M:%S %Y" # define HERE_D_FMT "%m/%d/%y" # define HERE_AM_STR "AM" # define HERE_PM_STR "PM" # define HERE_T_FMT_AMPM "%I:%M:%S %p" # define HERE_T_FMT "%H:%M:%S" static const unsigned short int __mon_yday[2][13] = { /* Normal years. */ { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 }, /* Leap years. */ { 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 } }; #endif /* Status of lookup: do we use the locale data or the raw data? */ enum locale_status { not, loc, raw }; #ifndef __isleap /* Nonzero if YEAR is a leap year (every 4 years, except every 100th isn't, and every 400th is). */ # define __isleap(year) \ ((year) % 4 == 0 && ((year) % 100 != 0 || (year) % 400 == 0)) #endif /* Compute the day of the week. */ static void day_of_the_week (struct tm *tm) { /* We know that January 1st 1970 was a Thursday (= 4). Compute the the difference between this data in the one on TM and so determine the weekday. */ int corr_year = 1900 + tm->tm_year - (tm->tm_mon < 2); int wday = (-473 + (365 * (tm->tm_year - 70)) + (corr_year / 4) - ((corr_year / 4) / 25) + ((corr_year / 4) % 25 < 0) + (((corr_year / 4) / 25) / 4) + __mon_yday[0][tm->tm_mon] + tm->tm_mday - 1); tm->tm_wday = ((wday % 7) + 7) % 7; } /* Compute the day of the year. */ static void day_of_the_year (struct tm *tm) { tm->tm_yday = (__mon_yday[__isleap (1900 + tm->tm_year)][tm->tm_mon] + (tm->tm_mday - 1)); } static char * #ifdef _LIBC internal_function #endif strptime_internal __P ((const char *rp, const char *fmt, struct tm *tm, enum locale_status *decided, int era_cnt)); static char * #ifdef _LIBC internal_function #endif strptime_internal (rp, fmt, tm, decided, era_cnt) const char *rp; const char *fmt; struct tm *tm; enum locale_status *decided; int era_cnt; { int cnt; size_t val; int have_I, is_pm; int century, want_century; int want_era; int have_wday, want_xday; int have_yday; int have_mon, have_mday; #ifdef _NL_CURRENT const char *rp_backup; size_t num_eras; struct era_entry *era; era = NULL; #endif have_I = is_pm = 0; century = -1; want_century = 0; want_era = 0; have_wday = want_xday = have_yday = have_mon = have_mday = 0; while (*fmt != '\0') { /* A white space in the format string matches 0 more or white space in the input string. */ if (isspace (*fmt)) { while (isspace (*rp)) ++rp; ++fmt; continue; } /* Any character but `%' must be matched by the same character in the iput string. */ if (*fmt != '%') { match_char (*fmt++, *rp++); continue; } ++fmt; #ifndef _NL_CURRENT /* We need this for handling the `E' modifier. */ start_over: #endif #ifdef _NL_CURRENT /* Make back up of current processing pointer. */ rp_backup = rp; #endif switch (*fmt++) { case '%': /* Match the `%' character itself. */ match_char ('%', *rp++); break; case 'a': case 'A': /* Match day of week. */ for (cnt = 0; cnt < 7; ++cnt) { #ifdef _NL_CURRENT if (*decided !=raw) { if (match_string (_NL_CURRENT (LC_TIME, DAY_1 + cnt), rp)) { if (*decided == not && strcmp (_NL_CURRENT (LC_TIME, DAY_1 + cnt), weekday_name[cnt])) *decided = loc; break; } if (match_string (_NL_CURRENT (LC_TIME, ABDAY_1 + cnt), rp)) { if (*decided == not && strcmp (_NL_CURRENT (LC_TIME, ABDAY_1 + cnt), ab_weekday_name[cnt])) *decided = loc; break; } } #endif if (*decided != loc && (match_string (weekday_name[cnt], rp) || match_string (ab_weekday_name[cnt], rp))) { *decided = raw; break; } } if (cnt == 7) /* Does not match a weekday name. */ return NULL; tm->tm_wday = cnt; have_wday = 1; break; case 'b': case 'B': case 'h': /* Match month name. */ for (cnt = 0; cnt < 12; ++cnt) { #ifdef _NL_CURRENT if (*decided !=raw) { if (match_string (_NL_CURRENT (LC_TIME, MON_1 + cnt), rp)) { if (*decided == not && strcmp (_NL_CURRENT (LC_TIME, MON_1 + cnt), month_name[cnt])) *decided = loc; break; } if (match_string (_NL_CURRENT (LC_TIME, ABMON_1 + cnt), rp)) { if (*decided == not && strcmp (_NL_CURRENT (LC_TIME, ABMON_1 + cnt), ab_month_name[cnt])) *decided = loc; break; } } #endif if (match_string (month_name[cnt], rp) || match_string (ab_month_name[cnt], rp)) { *decided = raw; break; } } if (cnt == 12) /* Does not match a month name. */ return NULL; tm->tm_mon = cnt; want_xday = 1; break; case 'c': /* Match locale's date and time format. */ #ifdef _NL_CURRENT if (*decided != raw) { if (!recursive (_NL_CURRENT (LC_TIME, D_T_FMT))) { if (*decided == loc) return NULL; else rp = rp_backup; } else { if (*decided == not && strcmp (_NL_CURRENT (LC_TIME, D_T_FMT), HERE_D_T_FMT)) *decided = loc; want_xday = 1; break; } *decided = raw; } #endif if (!recursive (HERE_D_T_FMT)) return NULL; want_xday = 1; break; case 'C': /* Match century number. */ #ifdef _NL_CURRENT match_century: #endif get_number (0, 99, 2); century = val; want_xday = 1; break; case 'd': case 'e': /* Match day of month. */ get_number (1, 31, 2); tm->tm_mday = val; have_mday = 1; want_xday = 1; break; case 'F': if (!recursive ("%Y-%m-%d")) return NULL; want_xday = 1; break; case 'x': #ifdef _NL_CURRENT if (*decided != raw) { if (!recursive (_NL_CURRENT (LC_TIME, D_FMT))) { if (*decided == loc) return NULL; else rp = rp_backup; } else { if (*decided == not && strcmp (_NL_CURRENT (LC_TIME, D_FMT), HERE_D_FMT)) *decided = loc; want_xday = 1; break; } *decided = raw; } #endif FALL_THROUGH; case 'D': /* Match standard day format. */ if (!recursive (HERE_D_FMT)) return NULL; want_xday = 1; break; case 'k': case 'H': /* Match hour in 24-hour clock. */ get_number (0, 23, 2); tm->tm_hour = val; have_I = 0; break; case 'I': /* Match hour in 12-hour clock. */ get_number (1, 12, 2); tm->tm_hour = val % 12; have_I = 1; break; case 'j': /* Match day number of year. */ get_number (1, 366, 3); tm->tm_yday = val - 1; have_yday = 1; break; case 'm': /* Match number of month. */ get_number (1, 12, 2); tm->tm_mon = val - 1; have_mon = 1; want_xday = 1; break; case 'M': /* Match minute. */ get_number (0, 59, 2); tm->tm_min = val; break; case 'n': case 't': /* Match any white space. */ while (isspace (*rp)) ++rp; break; case 'p': /* Match locale's equivalent of AM/PM. */ #ifdef _NL_CURRENT if (*decided != raw) { if (match_string (_NL_CURRENT (LC_TIME, AM_STR), rp)) { if (strcmp (_NL_CURRENT (LC_TIME, AM_STR), HERE_AM_STR)) *decided = loc; break; } if (match_string (_NL_CURRENT (LC_TIME, PM_STR), rp)) { if (strcmp (_NL_CURRENT (LC_TIME, PM_STR), HERE_PM_STR)) *decided = loc; is_pm = 1; break; } *decided = raw; } #endif if (!match_string (HERE_AM_STR, rp)) { if (match_string (HERE_PM_STR, rp)) { is_pm = 1; } else { return NULL; } } break; case 'r': #ifdef _NL_CURRENT if (*decided != raw) { if (!recursive (_NL_CURRENT (LC_TIME, T_FMT_AMPM))) { if (*decided == loc) return NULL; else rp = rp_backup; } else { if (*decided == not && strcmp (_NL_CURRENT (LC_TIME, T_FMT_AMPM), HERE_T_FMT_AMPM)) *decided = loc; break; } *decided = raw; } #endif if (!recursive (HERE_T_FMT_AMPM)) return NULL; break; case 'R': if (!recursive ("%H:%M")) return NULL; break; case 's': { /* The number of seconds may be very high so we cannot use the `get_number' macro. Instead read the number character for character and construct the result while doing this. */ time_t secs = 0; if (*rp < '0' || *rp > '9') /* We need at least one digit. */ return NULL; do { secs *= 10; secs += *rp++ - '0'; } while (*rp >= '0' && *rp <= '9'); if (localtime_r (&secs, tm) == NULL) /* Error in function. */ return NULL; } break; case 'S': get_number (0, 61, 2); tm->tm_sec = val; break; case 'X': #ifdef _NL_CURRENT if (*decided != raw) { if (!recursive (_NL_CURRENT (LC_TIME, T_FMT))) { if (*decided == loc) return NULL; else rp = rp_backup; } else { if (strcmp (_NL_CURRENT (LC_TIME, T_FMT), HERE_T_FMT)) *decided = loc; break; } *decided = raw; } #endif FALL_THROUGH; case 'T': if (!recursive (HERE_T_FMT)) return NULL; break; case 'u': get_number (1, 7, 1); tm->tm_wday = val % 7; have_wday = 1; break; case 'g': get_number (0, 99, 2); /* XXX This cannot determine any field in TM. */ break; case 'G': if (*rp < '0' || *rp > '9') return NULL; /* XXX Ignore the number since we would need some more information to compute a real date. */ do ++rp; while (*rp >= '0' && *rp <= '9'); break; case 'U': case 'V': case 'W': get_number (0, 53, 2); /* XXX This cannot determine any field in TM without some information. */ break; case 'w': /* Match number of weekday. */ get_number (0, 6, 1); tm->tm_wday = val; have_wday = 1; break; case 'y': #ifdef _NL_CURRENT match_year_in_century: #endif /* Match year within century. */ get_number (0, 99, 2); /* The "Year 2000: The Millennium Rollover" paper suggests that values in the range 69-99 refer to the twentieth century. */ tm->tm_year = val >= 69 ? val : val + 100; /* Indicate that we want to use the century, if specified. */ want_century = 1; want_xday = 1; break; case 'Y': /* Match year including century number. */ get_number (0, 9999, 4); tm->tm_year = val - 1900; want_century = 0; want_xday = 1; break; case 'Z': /* XXX How to handle this? */ break; case 'E': #ifdef _NL_CURRENT switch (*fmt++) { case 'c': /* Match locale's alternate date and time format. */ if (*decided != raw) { const char *fmt = _NL_CURRENT (LC_TIME, ERA_D_T_FMT); if (*fmt == '\0') fmt = _NL_CURRENT (LC_TIME, D_T_FMT); if (!recursive (fmt)) { if (*decided == loc) return NULL; else rp = rp_backup; } else { if (strcmp (fmt, HERE_D_T_FMT)) *decided = loc; want_xday = 1; break; } *decided = raw; } /* The C locale has no era information, so use the normal representation. */ if (!recursive (HERE_D_T_FMT)) return NULL; want_xday = 1; break; case 'C': if (*decided != raw) { if (era_cnt >= 0) { era = _nl_select_era_entry (era_cnt); if (match_string (era->era_name, rp)) { *decided = loc; break; } else return NULL; } else { num_eras = _NL_CURRENT_WORD (LC_TIME, _NL_TIME_ERA_NUM_ENTRIES); for (era_cnt = 0; era_cnt < (int) num_eras; ++era_cnt, rp = rp_backup) { era = _nl_select_era_entry (era_cnt); if (match_string (era->era_name, rp)) { *decided = loc; break; } } if (era_cnt == (int) num_eras) { era_cnt = -1; if (*decided == loc) return NULL; } else break; } *decided = raw; } /* The C locale has no era information, so use the normal representation. */ goto match_century; case 'y': if (*decided == raw) goto match_year_in_century; get_number(0, 9999, 4); tm->tm_year = val; want_era = 1; want_xday = 1; break; case 'Y': if (*decided != raw) { num_eras = _NL_CURRENT_WORD (LC_TIME, _NL_TIME_ERA_NUM_ENTRIES); for (era_cnt = 0; era_cnt < (int) num_eras; ++era_cnt, rp = rp_backup) { era = _nl_select_era_entry (era_cnt); if (recursive (era->era_format)) break; } if (era_cnt == (int) num_eras) { era_cnt = -1; if (*decided == loc) return NULL; else rp = rp_backup; } else { *decided = loc; era_cnt = -1; break; } *decided = raw; } get_number (0, 9999, 4); tm->tm_year = val - 1900; want_century = 0; want_xday = 1; break; case 'x': if (*decided != raw) { const char *fmt = _NL_CURRENT (LC_TIME, ERA_D_FMT); if (*fmt == '\0') fmt = _NL_CURRENT (LC_TIME, D_FMT); if (!recursive (fmt)) { if (*decided == loc) return NULL; else rp = rp_backup; } else { if (strcmp (fmt, HERE_D_FMT)) *decided = loc; break; } *decided = raw; } if (!recursive (HERE_D_FMT)) return NULL; break; case 'X': if (*decided != raw) { const char *fmt = _NL_CURRENT (LC_TIME, ERA_T_FMT); if (*fmt == '\0') fmt = _NL_CURRENT (LC_TIME, T_FMT); if (!recursive (fmt)) { if (*decided == loc) return NULL; else rp = rp_backup; } else { if (strcmp (fmt, HERE_T_FMT)) *decided = loc; break; } *decided = raw; } if (!recursive (HERE_T_FMT)) return NULL; break; default: return NULL; } break; #else /* We have no information about the era format. Just use the normal format. */ if (*fmt != 'c' && *fmt != 'C' && *fmt != 'y' && *fmt != 'Y' && *fmt != 'x' && *fmt != 'X') /* This is an illegal format. */ return NULL; goto start_over; #endif case 'O': switch (*fmt++) { case 'd': case 'e': /* Match day of month using alternate numeric symbols. */ get_alt_number (1, 31, 2); tm->tm_mday = val; have_mday = 1; want_xday = 1; break; case 'H': /* Match hour in 24-hour clock using alternate numeric symbols. */ get_alt_number (0, 23, 2); tm->tm_hour = val; have_I = 0; break; case 'I': /* Match hour in 12-hour clock using alternate numeric symbols. */ get_alt_number (1, 12, 2); tm->tm_hour = val - 1; have_I = 1; break; case 'm': /* Match month using alternate numeric symbols. */ get_alt_number (1, 12, 2); tm->tm_mon = val - 1; have_mon = 1; want_xday = 1; break; case 'M': /* Match minutes using alternate numeric symbols. */ get_alt_number (0, 59, 2); tm->tm_min = val; break; case 'S': /* Match seconds using alternate numeric symbols. */ get_alt_number (0, 61, 2); tm->tm_sec = val; break; case 'U': case 'V': case 'W': get_alt_number (0, 53, 2); /* XXX This cannot determine any field in TM without further information. */ break; case 'w': /* Match number of weekday using alternate numeric symbols. */ get_alt_number (0, 6, 1); tm->tm_wday = val; have_wday = 1; break; case 'y': /* Match year within century using alternate numeric symbols. */ get_alt_number (0, 99, 2); tm->tm_year = val >= 69 ? val : val + 100; want_xday = 1; break; default: return NULL; } break; default: return NULL; } } if (have_I && is_pm) tm->tm_hour += 12; if (century != -1) { if (want_century) tm->tm_year = tm->tm_year % 100 + (century - 19) * 100; else /* Only the century, but not the year. Strange, but so be it. */ tm->tm_year = (century - 19) * 100; } #ifdef _NL_CURRENT if (era_cnt != -1) { era = _nl_select_era_entry(era_cnt); if (want_era) tm->tm_year = (era->start_date[0] + ((tm->tm_year - era->offset) * era->absolute_direction)); else /* Era start year assumed. */ tm->tm_year = era->start_date[0]; } else #endif if (want_era) return NULL; if (want_xday && !have_wday) { if ( !(have_mon && have_mday) && have_yday) { /* We don't have tm_mon and/or tm_mday, compute them. */ int t_mon = 0; while (__mon_yday[__isleap(1900 + tm->tm_year)][t_mon] <= tm->tm_yday) t_mon++; if (!have_mon) tm->tm_mon = t_mon - 1; if (!have_mday) tm->tm_mday = (tm->tm_yday - __mon_yday[__isleap(1900 + tm->tm_year)][t_mon - 1] + 1); } day_of_the_week (tm); } if (want_xday && !have_yday) day_of_the_year (tm); return discard_const_p(char, rp); } char *rep_strptime(const char *buf, const char *format, struct tm *tm) { enum locale_status decided; #ifdef _NL_CURRENT decided = not; #else decided = raw; #endif return strptime_internal (buf, format, tm, &decided, -1); } tdb-1.4.2/lib/replace/system/README0000660000000000000000000000036212406075657016625 0ustar rootroot00000000000000This directory contains wrappers around logical groups of system include files. The idea is to avoid #ifdef blocks in the main code, and instead put all the necessary conditional includes in subsystem specific header files in this directory. tdb-1.4.2/lib/replace/system/aio.h0000660000000000000000000000201412406075657016662 0ustar rootroot00000000000000#ifndef _system_aio_h #define _system_aio_h /* Unix SMB/CIFS implementation. AIO system include wrappers Copyright (C) Andrew Tridgell 2006 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifdef HAVE_LIBAIO_H #include #endif #endif tdb-1.4.2/lib/replace/system/capability.h0000660000000000000000000000322613444661620020233 0ustar rootroot00000000000000#ifndef _system_capability_h #define _system_capability_h /* Unix SMB/CIFS implementation. capability system include wrappers Copyright (C) Andrew Tridgell 2004 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifdef HAVE_SYS_CAPABILITY_H #if defined(BROKEN_REDHAT_7_SYSTEM_HEADERS) && !defined(_I386_STATFS_H) && !defined(_PPC_STATFS_H) #define _I386_STATFS_H #define _PPC_STATFS_H #define BROKEN_REDHAT_7_STATFS_WORKAROUND #endif #if defined(BROKEN_RHEL5_SYS_CAP_HEADER) && !defined(_LINUX_TYPES_H) #define BROKEN_RHEL5_SYS_CAP_HEADER_WORKAROUND #endif #ifdef HAVE_POSIX_CAPABILITIES #include #endif #ifdef BROKEN_RHEL5_SYS_CAP_HEADER_WORKAROUND #undef _LINUX_TYPES_H #undef BROKEN_RHEL5_SYS_CAP_HEADER_WORKAROUND #endif #ifdef BROKEN_REDHAT_7_STATFS_WORKAROUND #undef _PPC_STATFS_H #undef _I386_STATFS_H #undef BROKEN_REDHAT_7_STATFS_WORKAROUND #endif #endif #endif tdb-1.4.2/lib/replace/system/dir.h0000660000000000000000000000352513444661620016672 0ustar rootroot00000000000000#ifndef _system_dir_h #define _system_dir_h /* Unix SMB/CIFS implementation. directory system include wrappers Copyright (C) Andrew Tridgell 2004 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifdef HAVE_DIRENT_H # include # define NAMLEN(dirent) strlen((dirent)->d_name) #else # define dirent direct # define NAMLEN(dirent) (dirent)->d_namlen # if HAVE_SYS_NDIR_H # include # endif # if HAVE_SYS_DIR_H # include # endif # if HAVE_NDIR_H # include # endif #endif #ifndef HAVE_MKDIR_MODE #define mkdir(dir, mode) mkdir(dir) #endif #ifdef HAVE_LIBGEN_H # include #endif /* Test whether a file name is the "." or ".." directory entries. * These really should be inline functions. */ #ifndef ISDOT #define ISDOT(path) ( \ *((const char *)(path)) == '.' && \ *(((const char *)(path)) + 1) == '\0' \ ) #endif #ifndef ISDOTDOT #define ISDOTDOT(path) ( \ *((const char *)(path)) == '.' && \ *(((const char *)(path)) + 1) == '.' && \ *(((const char *)(path)) + 2) == '\0' \ ) #endif #endif tdb-1.4.2/lib/replace/system/filesys.h0000660000000000000000000001360313444661620017570 0ustar rootroot00000000000000#ifndef _system_filesys_h #define _system_filesys_h /* Unix SMB/CIFS implementation. filesystem system include wrappers Copyright (C) Andrew Tridgell 2004 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifdef HAVE_UNISTD_H #include #endif #include #ifdef HAVE_SYS_PARAM_H #include #endif #ifdef HAVE_SYS_MOUNT_H #include #endif #ifdef HAVE_MNTENT_H #include #endif #ifdef HAVE_SYS_VFS_H #include #endif #ifdef HAVE_SYS_ACL_H #include #endif #ifdef HAVE_ACL_LIBACL_H #include #endif #ifdef HAVE_SYS_FS_S5PARAM_H #include #endif #if defined (HAVE_SYS_FILSYS_H) && !defined (_CRAY) #include #endif #ifdef HAVE_SYS_STATFS_H # include #endif #ifdef HAVE_DUSTAT_H #include #endif #ifdef HAVE_SYS_STATVFS_H #include #endif #ifdef HAVE_SYS_FILIO_H #include #endif #ifdef HAVE_SYS_FILE_H #include #endif #ifdef HAVE_FCNTL_H #include #else #ifdef HAVE_SYS_FCNTL_H #include #endif #endif #ifdef HAVE_SYS_MODE_H /* apparently AIX needs this for S_ISLNK */ #ifndef S_ISLNK #include #endif #endif #ifdef HAVE_SYS_IOCTL_H #include #endif #ifdef HAVE_SYS_UIO_H #include #endif #if defined(HAVE_SYS_ATTRIBUTES_H) #include #elif defined(HAVE_ATTR_ATTRIBUTES_H) #include #endif /* mutually exclusive (SuSE 8.2) */ #if defined(HAVE_ATTR_XATTR_H) #include #elif defined(HAVE_SYS_XATTR_H) #include #endif #ifdef HAVE_SYS_EA_H #include #endif #ifdef HAVE_SYS_EXTATTR_H #include #endif #ifdef HAVE_SYS_RESOURCE_H #include #endif #ifndef XATTR_CREATE #define XATTR_CREATE 0x1 /* set value, fail if attr already exists */ #endif #ifndef XATTR_REPLACE #define XATTR_REPLACE 0x2 /* set value, fail if attr does not exist */ #endif /* Some POSIX definitions for those without */ #ifndef S_IFDIR #define S_IFDIR 0x4000 #endif #ifndef S_ISDIR #define S_ISDIR(mode) ((mode & 0xF000) == S_IFDIR) #endif #ifndef S_IRWXU #define S_IRWXU 00700 /* read, write, execute: owner */ #endif #ifndef S_IRUSR #define S_IRUSR 00400 /* read permission: owner */ #endif #ifndef S_IWUSR #define S_IWUSR 00200 /* write permission: owner */ #endif #ifndef S_IXUSR #define S_IXUSR 00100 /* execute permission: owner */ #endif #ifndef S_IRWXG #define S_IRWXG 00070 /* read, write, execute: group */ #endif #ifndef S_IRGRP #define S_IRGRP 00040 /* read permission: group */ #endif #ifndef S_IWGRP #define S_IWGRP 00020 /* write permission: group */ #endif #ifndef S_IXGRP #define S_IXGRP 00010 /* execute permission: group */ #endif #ifndef S_IRWXO #define S_IRWXO 00007 /* read, write, execute: other */ #endif #ifndef S_IROTH #define S_IROTH 00004 /* read permission: other */ #endif #ifndef S_IWOTH #define S_IWOTH 00002 /* write permission: other */ #endif #ifndef S_IXOTH #define S_IXOTH 00001 /* execute permission: other */ #endif #ifndef O_ACCMODE #define O_ACCMODE (O_RDONLY | O_WRONLY | O_RDWR) #endif #ifndef MAXPATHLEN #define MAXPATHLEN 256 #endif #ifndef SEEK_SET #define SEEK_SET 0 #endif #ifdef _WIN32 #define mkdir(d,m) _mkdir(d) #endif /* this allows us to use a uniform error handling for our xattr wrappers */ #ifndef ENOATTR #define ENOATTR ENODATA #endif #if !defined(HAVE_XATTR_XATTR) || defined(XATTR_ADDITIONAL_OPTIONS) ssize_t rep_getxattr (const char *path, const char *name, void *value, size_t size); #define getxattr(path, name, value, size) rep_getxattr(path, name, value, size) /* define is in "replace.h" */ ssize_t rep_fgetxattr (int filedes, const char *name, void *value, size_t size); #define fgetxattr(filedes, name, value, size) rep_fgetxattr(filedes, name, value, size) /* define is in "replace.h" */ ssize_t rep_listxattr (const char *path, char *list, size_t size); #define listxattr(path, list, size) rep_listxattr(path, list, size) /* define is in "replace.h" */ ssize_t rep_flistxattr (int filedes, char *list, size_t size); #define flistxattr(filedes, value, size) rep_flistxattr(filedes, value, size) /* define is in "replace.h" */ int rep_removexattr (const char *path, const char *name); #define removexattr(path, name) rep_removexattr(path, name) /* define is in "replace.h" */ int rep_fremovexattr (int filedes, const char *name); #define fremovexattr(filedes, name) rep_fremovexattr(filedes, name) /* define is in "replace.h" */ int rep_setxattr (const char *path, const char *name, const void *value, size_t size, int flags); #define setxattr(path, name, value, size, flags) rep_setxattr(path, name, value, size, flags) /* define is in "replace.h" */ int rep_fsetxattr (int filedes, const char *name, const void *value, size_t size, int flags); #define fsetxattr(filedes, name, value, size, flags) rep_fsetxattr(filedes, name, value, size, flags) /* define is in "replace.h" */ #endif /* !defined(HAVE_XATTR_XATTR) || defined(XATTR_ADDITIONAL_OPTIONS) */ #endif tdb-1.4.2/lib/replace/system/glob.h0000660000000000000000000000207712406075657017046 0ustar rootroot00000000000000#ifndef _system_glob_h #define _system_glob_h /* Unix SMB/CIFS implementation. glob system include wrappers Copyright (C) Andrew Tridgell 2004 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifdef HAVE_GLOB_H #include #endif #ifdef HAVE_FNMATCH_H #include #endif #endif tdb-1.4.2/lib/replace/system/gssapi.h0000660000000000000000000000272513444661620017403 0ustar rootroot00000000000000#ifndef _system_gssapi_h #define _system_gssapi_h /* Unix SMB/CIFS implementation. GSSAPI system include wrappers Copyright (C) Andrew Tridgell 2004 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifdef HAVE_GSSAPI #ifdef HAVE_GSSAPI_GSSAPI_EXT_H #include #elif defined(HAVE_GSSAPI_GSSAPI_H) #include #elif defined(HAVE_GSSAPI_GSSAPI_GENERIC_H) #include #elif defined(HAVE_GSSAPI_H) #include #endif #ifdef HAVE_GSSAPI_GSSAPI_KRB5_H #include #endif #ifdef HAVE_GSSAPI_GSSAPI_SPNEGO_H #include #elif defined(HAVE_GSSAPI_SPNEGO_H) #include #endif #endif #endif tdb-1.4.2/lib/replace/system/iconv.h0000660000000000000000000000304612406075657017236 0ustar rootroot00000000000000#ifndef _system_iconv_h #define _system_iconv_h /* Unix SMB/CIFS implementation. iconv memory system include wrappers Copyright (C) Andrew Tridgell 2004 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #if !defined(HAVE_ICONV) && defined(HAVE_ICONV_H) #define HAVE_ICONV #endif #if !defined(HAVE_GICONV) && defined(HAVE_GICONV_H) #define HAVE_GICONV #endif #if !defined(HAVE_BICONV) && defined(HAVE_BICONV_H) #define HAVE_BICONV #endif #ifdef HAVE_NATIVE_ICONV #if defined(HAVE_ICONV) #include #elif defined(HAVE_GICONV) #include #elif defined(HAVE_BICONV) #include #endif #endif /* HAVE_NATIVE_ICONV */ /* needed for some systems without iconv. Doesn't really matter what error code we use */ #ifndef EILSEQ #define EILSEQ EIO #endif #endif tdb-1.4.2/lib/replace/system/kerberos.h0000660000000000000000000000213713444661620017726 0ustar rootroot00000000000000#ifndef _system_kerberos_h #define _system_kerberos_h /* Unix SMB/CIFS implementation. kerberos system include wrappers Copyright (C) Andrew Tridgell 2004 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifdef HAVE_KRB5 #ifdef HAVE_KRB5_H #include #endif #ifdef HAVE_COM_ERR_H #include #endif #endif #endif tdb-1.4.2/lib/replace/system/locale.h0000660000000000000000000000216412406075657017357 0ustar rootroot00000000000000#ifndef _system_locale_h #define _system_locale_h /* Unix SMB/CIFS implementation. locale include wrappers Copyright (C) Andrew Tridgell 2004 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifdef HAVE_CTYPE_H #include #endif #ifdef HAVE_LOCALE_H #include #endif #ifdef HAVE_LANGINFO_H #include #endif #endif tdb-1.4.2/lib/replace/system/network.h0000660000000000000000000001722113055076237017605 0ustar rootroot00000000000000#ifndef _system_network_h #define _system_network_h /* Unix SMB/CIFS implementation. networking system include wrappers Copyright (C) Andrew Tridgell 2004 Copyright (C) Jelmer Vernooij 2007 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifndef LIBREPLACE_NETWORK_CHECKS #error "AC_LIBREPLACE_NETWORK_CHECKS missing in configure" #endif #ifdef HAVE_UNISTD_H #include #endif #ifdef HAVE_SYS_SOCKET_H #include #endif #ifdef HAVE_UNIXSOCKET #include #endif #ifdef HAVE_NETINET_IN_H #include #endif #ifdef HAVE_ARPA_INET_H #include #endif #ifdef HAVE_NETDB_H #include #endif #ifdef HAVE_NETINET_TCP_H #include #endif /* * The next three defines are needed to access the IPTOS_* options * on some systems. */ #ifdef HAVE_NETINET_IN_SYSTM_H #include #endif #ifdef HAVE_NETINET_IN_IP_H #include #endif #ifdef HAVE_NETINET_IP_H #include #endif #ifdef HAVE_NET_IF_H #include #endif #ifdef HAVE_SYS_IOCTL_H #include #endif #ifdef HAVE_SYS_UIO_H #include #endif #ifdef HAVE_STROPTS_H #include #endif #ifndef HAVE_SOCKLEN_T #define HAVE_SOCKLEN_T typedef int socklen_t; #endif #if !defined (HAVE_INET_NTOA) || defined(REPLACE_INET_NTOA) /* define is in "replace.h" */ char *rep_inet_ntoa(struct in_addr ip); #endif #ifndef HAVE_INET_PTON /* define is in "replace.h" */ int rep_inet_pton(int af, const char *src, void *dst); #endif #ifndef HAVE_INET_NTOP /* define is in "replace.h" */ const char *rep_inet_ntop(int af, const void *src, char *dst, socklen_t size); #endif #ifndef HAVE_INET_ATON /* define is in "replace.h" */ int rep_inet_aton(const char *src, struct in_addr *dst); #endif #ifndef HAVE_CONNECT /* define is in "replace.h" */ int rep_connect(int sockfd, const struct sockaddr *addr, socklen_t addrlen); #endif #ifndef HAVE_GETHOSTBYNAME /* define is in "replace.h" */ struct hostent *rep_gethostbyname(const char *name); #endif #ifdef HAVE_IFADDRS_H #include #endif #ifndef HAVE_STRUCT_IFADDRS struct ifaddrs { struct ifaddrs *ifa_next; /* Pointer to next struct */ char *ifa_name; /* Interface name */ unsigned int ifa_flags; /* Interface flags */ struct sockaddr *ifa_addr; /* Interface address */ struct sockaddr *ifa_netmask; /* Interface netmask */ #undef ifa_dstaddr struct sockaddr *ifa_dstaddr; /* P2P interface destination */ void *ifa_data; /* Address specific data */ }; #endif #ifndef HAVE_GETIFADDRS int rep_getifaddrs(struct ifaddrs **); #endif #ifndef HAVE_FREEIFADDRS void rep_freeifaddrs(struct ifaddrs *); #endif #ifndef HAVE_SOCKETPAIR /* define is in "replace.h" */ int rep_socketpair(int d, int type, int protocol, int sv[2]); #endif /* * Some systems have getaddrinfo but not the * defines needed to use it. */ /* Various macros that ought to be in , but might not be */ #ifndef EAI_FAIL #define EAI_BADFLAGS (-1) #define EAI_NONAME (-2) #define EAI_AGAIN (-3) #define EAI_FAIL (-4) #define EAI_FAMILY (-6) #define EAI_SOCKTYPE (-7) #define EAI_SERVICE (-8) #define EAI_MEMORY (-10) #define EAI_SYSTEM (-11) #endif /* !EAI_FAIL */ #ifndef AI_PASSIVE #define AI_PASSIVE 0x0001 #endif #ifndef AI_CANONNAME #define AI_CANONNAME 0x0002 #endif #ifndef AI_NUMERICHOST /* * some platforms don't support AI_NUMERICHOST; define as zero if using * the system version of getaddrinfo... */ #if defined(HAVE_STRUCT_ADDRINFO) && defined(HAVE_GETADDRINFO) #define AI_NUMERICHOST 0 #else #define AI_NUMERICHOST 0x0004 #endif #endif /* * Some of the functions in source3/lib/util_sock.c use AI_ADDRCONFIG. On QNX * 6.3.0, this macro is defined but, if it's used, getaddrinfo will fail. This * prevents smbd from opening any sockets. * * If I undefine AI_ADDRCONFIG on such systems and define it to be 0, * this works around the issue. */ #ifdef __QNX__ #include #if _NTO_VERSION == 630 #undef AI_ADDRCONFIG #endif #endif #ifndef AI_ADDRCONFIG /* * logic copied from AI_NUMERICHOST */ #if defined(HAVE_STRUCT_ADDRINFO) && defined(HAVE_GETADDRINFO) #define AI_ADDRCONFIG 0 #else #define AI_ADDRCONFIG 0x0020 #endif #endif #ifndef AI_NUMERICSERV /* * logic copied from AI_NUMERICHOST */ #if defined(HAVE_STRUCT_ADDRINFO) && defined(HAVE_GETADDRINFO) #define AI_NUMERICSERV 0 #else #define AI_NUMERICSERV 0x0400 #endif #endif #ifndef NI_NUMERICHOST #define NI_NUMERICHOST 1 #endif #ifndef NI_NUMERICSERV #define NI_NUMERICSERV 2 #endif #ifndef NI_NOFQDN #define NI_NOFQDN 4 #endif #ifndef NI_NAMEREQD #define NI_NAMEREQD 8 #endif #ifndef NI_DGRAM #define NI_DGRAM 16 #endif #ifndef NI_MAXHOST #define NI_MAXHOST 1025 #endif #ifndef NI_MAXSERV #define NI_MAXSERV 32 #endif /* * glibc on linux doesn't seem to have MSG_WAITALL * defined. I think the kernel has it though.. */ #ifndef MSG_WAITALL #define MSG_WAITALL 0 #endif #ifndef INADDR_LOOPBACK #define INADDR_LOOPBACK 0x7f000001 #endif #ifndef INADDR_NONE #define INADDR_NONE 0xffffffff #endif #ifndef EAFNOSUPPORT #define EAFNOSUPPORT EINVAL #endif #ifndef INET_ADDRSTRLEN #define INET_ADDRSTRLEN 16 #endif #ifndef INET6_ADDRSTRLEN #define INET6_ADDRSTRLEN 46 #endif #ifndef HOST_NAME_MAX #define HOST_NAME_MAX 255 #endif #ifndef MAXHOSTNAMELEN #define MAXHOSTNAMELEN HOST_NAME_MAX #endif #ifndef HAVE_SA_FAMILY_T #define HAVE_SA_FAMILY_T typedef unsigned short int sa_family_t; #endif #ifndef HAVE_STRUCT_SOCKADDR_STORAGE #define HAVE_STRUCT_SOCKADDR_STORAGE #ifdef HAVE_STRUCT_SOCKADDR_IN6 #define sockaddr_storage sockaddr_in6 #define ss_family sin6_family #define HAVE_SS_FAMILY 1 #else /*HAVE_STRUCT_SOCKADDR_IN6*/ #define sockaddr_storage sockaddr_in #define ss_family sin_family #define HAVE_SS_FAMILY 1 #endif /*HAVE_STRUCT_SOCKADDR_IN6*/ #endif /*HAVE_STRUCT_SOCKADDR_STORAGE*/ #ifndef HAVE_SS_FAMILY #ifdef HAVE___SS_FAMILY #define ss_family __ss_family #define HAVE_SS_FAMILY 1 #endif #endif #ifndef IOV_MAX # ifdef UIO_MAXIOV # define IOV_MAX UIO_MAXIOV # else # ifdef __sgi /* * IRIX 6.5 has sysconf(_SC_IOV_MAX) * which might return 512 or bigger */ # define IOV_MAX 512 # endif # endif #endif #ifndef HAVE_STRUCT_ADDRINFO #define HAVE_STRUCT_ADDRINFO struct addrinfo { int ai_flags; int ai_family; int ai_socktype; int ai_protocol; socklen_t ai_addrlen; struct sockaddr *ai_addr; char *ai_canonname; struct addrinfo *ai_next; }; #endif /* HAVE_STRUCT_ADDRINFO */ #if !defined(HAVE_GETADDRINFO) #include "getaddrinfo.h" #endif /* Needed for some systems that don't define it (Solaris). */ #ifndef ifr_netmask #define ifr_netmask ifr_addr #endif /* Some old Linux systems have broken header files */ #ifdef HAVE_IPV6 #ifdef HAVE_LINUX_IPV6_V6ONLY_26 #define IPV6_V6ONLY 26 #endif /* HAVE_LINUX_IPV6_V6ONLY_26 */ #endif /* HAVE_IPV6 */ #ifndef SCOPE_DELIMITER #define SCOPE_DELIMITER '%' #endif #endif tdb-1.4.2/lib/replace/system/nis.h0000660000000000000000000000303513527011454016675 0ustar rootroot00000000000000/* Unix SMB/CIFS implementation. nis system include wrappers Copyright (C) Andrew Tridgell 2004 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifndef _nis_passwd_h #define _nis_passwd_h #if defined(HAVE_RPC_RPC_H) /* * Check for AUTH_ERROR define conflict with rpc/rpc.h in prot.h. */ #if defined(HAVE_SYS_SECURITY_H) && defined(HAVE_RPC_AUTH_ERROR_CONFLICT) #undef AUTH_ERROR #endif /* HAVE_SYS_SECURITY_H && HAVE_RPC_AUTH_ERROR_CONFLICT */ #include #endif /* HAVE_RPC_RPC_H */ #if defined (HAVE_NETGROUP) #if defined(HAVE_RPCSVC_YP_PROT_H) #include #endif /* HAVE_RPCSVC_YP_PROT_H */ #if defined(HAVE_RPCSVC_YPCLNT_H) #include #endif /* HAVE_RPCSVC_YPCLNT_H */ #endif /* HAVE_NETGROUP */ #endif /* _nis_passwd_h */ tdb-1.4.2/lib/replace/system/passwd.h0000660000000000000000000000404212412743715017410 0ustar rootroot00000000000000#ifndef _system_passwd_h #define _system_passwd_h /* Unix SMB/CIFS implementation. passwd system include wrappers Copyright (C) Andrew Tridgell 2004 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifdef HAVE_UNISTD_H #include #endif #ifdef HAVE_PWD_H #include #endif #ifdef HAVE_GRP_H #include #endif #ifdef HAVE_SYS_PRIV_H #include #endif #ifdef HAVE_SYS_ID_H #include #endif #ifdef HAVE_CRYPT_H #include #endif #ifdef HAVE_SHADOW_H #include #endif #ifdef HAVE_SYS_SECURITY_H #include #include #define PASSWORD_LENGTH 16 #endif /* HAVE_SYS_SECURITY_H */ #ifdef HAVE_GETPWANAM #include #include #include #endif #ifdef HAVE_COMPAT_H #include #endif #ifndef NGROUPS_MAX #define NGROUPS_MAX 32 /* Guess... */ #endif /* what is the longest significant password available on your system? Knowing this speeds up password searches a lot */ #ifndef PASSWORD_LENGTH #define PASSWORD_LENGTH 8 #endif #ifndef ALLOW_CHANGE_PASSWORD #if (defined(HAVE_TERMIOS_H) && defined(HAVE_DUP2) && defined(HAVE_SETSID)) #define ALLOW_CHANGE_PASSWORD 1 #endif #endif #if defined(HAVE_CRYPT16) && defined(HAVE_GETAUTHUID) #define ULTRIX_AUTH 1 #endif #endif tdb-1.4.2/lib/replace/system/readline.h0000660000000000000000000000335013444661620017673 0ustar rootroot00000000000000#ifndef _system_readline_h #define _system_readline_h /* Unix SMB/CIFS implementation. Readline wrappers ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifdef HAVE_LIBREADLINE # ifdef HAVE_READLINE_READLINE_H # ifdef HAVE_READLINE_READLINE_WORKAROUND # define _FUNCTION_DEF # endif # include # ifdef HAVE_READLINE_HISTORY_H # include # endif # else # ifdef HAVE_READLINE_H # include # ifdef HAVE_HISTORY_H # include # endif # else # undef HAVE_LIBREADLINE # endif # endif #endif #ifdef HAVE_NEW_LIBREADLINE #ifdef HAVE_CPPFUNCTION # define RL_COMPLETION_CAST (CPPFunction *) #elif defined(HAVE_RL_COMPLETION_T) # define RL_COMPLETION_CAST (rl_completion_t *) #else # define RL_COMPLETION_CAST #endif #else /* This type is missing from libreadline<4.0 (approximately) */ # define RL_COMPLETION_CAST #endif /* HAVE_NEW_LIBREADLINE */ #endif tdb-1.4.2/lib/replace/system/select.h0000660000000000000000000000462212474026560017372 0ustar rootroot00000000000000#ifndef _system_select_h #define _system_select_h /* Unix SMB/CIFS implementation. select system include wrappers Copyright (C) Andrew Tridgell 2004 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifdef HAVE_SYS_SELECT_H #include #endif #ifdef HAVE_SYS_EPOLL_H #include #endif #ifdef HAVE_SOLARIS_PORTS #include #endif #ifndef SELECT_CAST #define SELECT_CAST #endif #ifdef HAVE_POLL #include #else /* Type used for the number of file descriptors. */ typedef unsigned long int nfds_t; /* Data structure describing a polling request. */ struct pollfd { int fd; /* File descriptor to poll. */ short int events; /* Types of events poller cares about. */ short int revents; /* Types of events that actually occurred. */ }; /* Event types that can be polled for. These bits may be set in `events' to indicate the interesting event types; they will appear in `revents' to indicate the status of the file descriptor. */ #define POLLIN 0x001 /* There is data to read. */ #define POLLPRI 0x002 /* There is urgent data to read. */ #define POLLOUT 0x004 /* Writing now will not block. */ #define POLLRDNORM 0x040 /* Normal data may be read. */ #define POLLRDBAND 0x080 /* Priority data may be read. */ #define POLLWRNORM 0x100 /* Writing now will not block. */ #define POLLWRBAND 0x200 /* Priority data may be written. */ #define POLLERR 0x008 /* Error condition. */ #define POLLHUP 0x010 /* Hung up. */ #define POLLNVAL 0x020 /* Invalid polling request. */ /* define is in "replace.h" */ int rep_poll(struct pollfd *fds, nfds_t nfds, int timeout); #endif #endif tdb-1.4.2/lib/replace/system/shmem.h0000660000000000000000000000262512406075657017233 0ustar rootroot00000000000000#ifndef _system_shmem_h #define _system_shmem_h /* Unix SMB/CIFS implementation. shared memory system include wrappers Copyright (C) Andrew Tridgell 2004 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #if defined(HAVE_SYS_IPC_H) #include #endif /* HAVE_SYS_IPC_H */ #if defined(HAVE_SYS_SHM_H) #include #endif /* HAVE_SYS_SHM_H */ #ifdef HAVE_SYS_MMAN_H #include #endif /* NetBSD doesn't have these */ #ifndef SHM_R #define SHM_R 0400 #endif #ifndef SHM_W #define SHM_W 0200 #endif #ifndef MAP_FILE #define MAP_FILE 0 #endif #ifndef MAP_FAILED #define MAP_FAILED ((void *)-1) #endif #endif tdb-1.4.2/lib/replace/system/syslog.h0000660000000000000000000000343512406075657017442 0ustar rootroot00000000000000#ifndef _system_syslog_h #define _system_syslog_h /* Unix SMB/CIFS implementation. syslog system include wrappers Copyright (C) Andrew Tridgell 2004 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifdef HAVE_SYSLOG_H #include #else #ifdef HAVE_SYS_SYSLOG_H #include #endif #endif /* For sys_adminlog(). */ #ifndef LOG_EMERG #define LOG_EMERG 0 /* system is unusable */ #endif #ifndef LOG_ALERT #define LOG_ALERT 1 /* action must be taken immediately */ #endif #ifndef LOG_CRIT #define LOG_CRIT 2 /* critical conditions */ #endif #ifndef LOG_ERR #define LOG_ERR 3 /* error conditions */ #endif #ifndef LOG_WARNING #define LOG_WARNING 4 /* warning conditions */ #endif #ifndef LOG_NOTICE #define LOG_NOTICE 5 /* normal but significant condition */ #endif #ifndef LOG_INFO #define LOG_INFO 6 /* informational */ #endif #ifndef LOG_DEBUG #define LOG_DEBUG 7 /* debug-level messages */ #endif #endif tdb-1.4.2/lib/replace/system/terminal.h0000660000000000000000000000262512406075657017735 0ustar rootroot00000000000000#ifndef _system_terminal_h #define _system_terminal_h /* Unix SMB/CIFS implementation. terminal system include wrappers Copyright (C) Andrew Tridgell 2004 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifdef SUNOS4 /* on SUNOS4 termios.h conflicts with sys/ioctl.h */ #undef HAVE_TERMIOS_H #endif #if defined(HAVE_TERMIOS_H) /* POSIX terminal handling. */ #include #elif defined(HAVE_TERMIO_H) /* Older SYSV terminal handling - don't use if we can avoid it. */ #include #elif defined(HAVE_SYS_TERMIO_H) /* Older SYSV terminal handling - don't use if we can avoid it. */ #include #endif #endif tdb-1.4.2/lib/replace/system/threads.h0000660000000000000000000000465313444661620017551 0ustar rootroot00000000000000#ifndef _system_threads_h #define _system_threads_h /* Unix SMB/CIFS implementation. macros to go along with the lib/replace/ portability layer code Copyright (C) Volker Lendecke 2012 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include #if defined(HAVE_PTHREAD_MUTEXATTR_SETROBUST_NP) && \ !defined(HAVE_PTHREAD_MUTEXATTR_SETROBUST) #define pthread_mutexattr_setrobust pthread_mutexattr_setrobust_np #endif #if defined(HAVE_DECL_PTHREAD_MUTEX_ROBUST_NP) && \ !defined(HAVE_DECL_PTHREAD_MUTEX_ROBUST) #define PTHREAD_MUTEX_ROBUST PTHREAD_MUTEX_ROBUST_NP #endif #if defined(HAVE_PTHREAD_MUTEX_CONSISTENT_NP) && \ !defined(HAVE_PTHREAD_MUTEX_CONSISTENT) #define pthread_mutex_consistent pthread_mutex_consistent_np #endif #ifdef HAVE_STDATOMIC_H #include #endif #ifndef HAVE_ATOMIC_THREAD_FENCE #ifdef HAVE___ATOMIC_THREAD_FENCE #define atomic_thread_fence(__ignore_order) __atomic_thread_fence(__ATOMIC_SEQ_CST) #define HAVE_ATOMIC_THREAD_FENCE 1 #endif /* HAVE___ATOMIC_THREAD_FENCE */ #endif /* not HAVE_ATOMIC_THREAD_FENCE */ #ifndef HAVE_ATOMIC_THREAD_FENCE #ifdef HAVE___SYNC_SYNCHRONIZE #define atomic_thread_fence(__ignore_order) __sync_synchronize() #define HAVE_ATOMIC_THREAD_FENCE 1 #endif /* HAVE___SYNC_SYNCHRONIZE */ #endif /* not HAVE_ATOMIC_THREAD_FENCE */ #ifndef HAVE_ATOMIC_THREAD_FENCE #ifdef HAVE_ATOMIC_THREAD_FENCE_SUPPORT #error mismatch_error_between_configure_test_and_header #endif /* make sure the build fails if someone uses it without checking the define */ #define atomic_thread_fence(__order) \ __function__atomic_thread_fence_not_available_on_this_platform__() #endif /* not HAVE_ATOMIC_THREAD_FENCE */ #endif tdb-1.4.2/lib/replace/system/time.h0000660000000000000000000000523713120574744017055 0ustar rootroot00000000000000#ifndef _system_time_h #define _system_time_h /* Unix SMB/CIFS implementation. time system include wrappers Copyright (C) Andrew Tridgell 2004 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifdef TIME_WITH_SYS_TIME #include #include #else #ifdef HAVE_SYS_TIME_H #include #else #include #endif #endif #ifdef HAVE_UTIME_H #include #else struct utimbuf { time_t actime; /* access time */ time_t modtime; /* modification time */ }; #endif #ifndef HAVE_STRUCT_TIMESPEC struct timespec { time_t tv_sec; /* Seconds. */ long tv_nsec; /* Nanoseconds. */ }; #endif #ifndef HAVE_MKTIME /* define is in "replace.h" */ time_t rep_mktime(struct tm *t); #endif #ifndef HAVE_TIMEGM /* define is in "replace.h" */ time_t rep_timegm(struct tm *tm); #endif #ifndef HAVE_UTIME /* define is in "replace.h" */ int rep_utime(const char *filename, const struct utimbuf *buf); #endif #ifndef HAVE_UTIMES /* define is in "replace.h" */ int rep_utimes(const char *filename, const struct timeval tv[2]); #endif #ifndef HAVE_CLOCK_GETTIME /* CLOCK_REALTIME is required by POSIX */ #define CLOCK_REALTIME 0 typedef int clockid_t; int rep_clock_gettime(clockid_t clk_id, struct timespec *tp); #endif /* make sure we have a best effort CUSTOM_CLOCK_MONOTONIC we can rely on. * * on AIX the values of CLOCK_* are cast expressions, not integer constants, * this prevents them from being compared against in a preprocessor directive. * The following ...IS_* macros can be used to check which clock is in use. */ #if defined(CLOCK_MONOTONIC) #define CUSTOM_CLOCK_MONOTONIC CLOCK_MONOTONIC #define CUSTOM_CLOCK_MONOTONIC_IS_MONOTONIC #elif defined(CLOCK_HIGHRES) #define CUSTOM_CLOCK_MONOTONIC CLOCK_HIGHRES #define CUSTOM_CLOCK_MONOTONIC_IS_HIGHRES #else #define CUSTOM_CLOCK_MONOTONIC CLOCK_REALTIME #define CUSTOM_CLOCK_MONOTONIC_IS_REALTIME #endif #endif tdb-1.4.2/lib/replace/system/wait.h0000660000000000000000000000257212702766507017067 0ustar rootroot00000000000000#ifndef _system_wait_h #define _system_wait_h /* Unix SMB/CIFS implementation. waitpid system include wrappers Copyright (C) Andrew Tridgell 2004 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #ifdef HAVE_SYS_WAIT_H #include #endif #include #ifndef SIGCLD #define SIGCLD SIGCHLD #endif #ifdef HAVE_SETJMP_H #include #endif #ifdef HAVE_SYS_UCONTEXT_H #include #endif #if !defined(HAVE_SIG_ATOMIC_T_TYPE) typedef int sig_atomic_t; #endif #if !defined(HAVE_WAITPID) && defined(HAVE_WAIT4) int rep_waitpid(pid_t pid,int *status,int options); #endif #endif tdb-1.4.2/lib/replace/system/wscript_configure0000660000000000000000000000156413444661620021423 0ustar rootroot00000000000000#!/usr/bin/env python # solaris varients of getXXent_r conf.CHECK_C_PROTOTYPE('getpwent_r', 'struct passwd *getpwent_r(struct passwd *src, char *buf, int buflen)', define='SOLARIS_GETPWENT_R', headers='pwd.h') conf.CHECK_C_PROTOTYPE('getgrent_r', 'struct group *getgrent_r(struct group *src, char *buf, int buflen)', define='SOLARIS_GETGRENT_R', headers='grp.h') # the irix varients conf.CHECK_C_PROTOTYPE('getpwent_r', 'struct passwd *getpwent_r(struct passwd *src, char *buf, size_t buflen)', define='SOLARIS_GETPWENT_R', headers='pwd.h') conf.CHECK_C_PROTOTYPE('getgrent_r', 'struct group *getgrent_r(struct group *src, char *buf, size_t buflen)', define='SOLARIS_GETGRENT_R', headers='grp.h') tdb-1.4.2/lib/replace/tests/getifaddrs.c0000660000000000000000000000504513526763114020042 0ustar rootroot00000000000000/* * Unix SMB/CIFS implementation. * * libreplace getifaddrs test * * Copyright (C) Michael Adam 2008 * * ** NOTE! The following LGPL license applies to the replace * ** library. This does NOT imply that all of Samba is released * ** under the LGPL * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 3 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see . */ #ifndef AUTOCONF_TEST #include "replace.h" #include "system/network.h" #include "replace-test.h" #endif #ifdef HAVE_INET_NTOP #define rep_inet_ntop inet_ntop #endif static const char *format_sockaddr(struct sockaddr *addr, char *addrstring, socklen_t addrlen) { const char *result = NULL; if (addr->sa_family == AF_INET) { result = rep_inet_ntop(AF_INET, &((struct sockaddr_in *)addr)->sin_addr, addrstring, addrlen); #ifdef HAVE_STRUCT_SOCKADDR_IN6 } else if (addr->sa_family == AF_INET6) { result = rep_inet_ntop(AF_INET6, &((struct sockaddr_in6 *)addr)->sin6_addr, addrstring, addrlen); #endif } return result; } int getifaddrs_test(void) { struct ifaddrs *ifs = NULL; struct ifaddrs *ifs_head = NULL; int ret; ret = getifaddrs(&ifs); ifs_head = ifs; if (ret != 0) { fprintf(stderr, "getifaddrs() failed: %s\n", strerror(errno)); return 1; } while (ifs) { printf("%-10s ", ifs->ifa_name); if (ifs->ifa_addr != NULL) { char addrstring[INET6_ADDRSTRLEN]; const char *result; result = format_sockaddr(ifs->ifa_addr, addrstring, sizeof(addrstring)); if (result != NULL) { printf("IP=%s ", addrstring); } if (ifs->ifa_netmask != NULL) { result = format_sockaddr(ifs->ifa_netmask, addrstring, sizeof(addrstring)); if (result != NULL) { printf("NETMASK=%s", addrstring); } } else { printf("AF=%d ", ifs->ifa_addr->sa_family); } } else { printf(""); } printf("\n"); ifs = ifs->ifa_next; } freeifaddrs(ifs_head); return 0; } tdb-1.4.2/lib/replace/tests/incoherent_mmap.c0000660000000000000000000000345613526763114021102 0ustar rootroot00000000000000/* In OpenBSD, if you write to a file, another process doesn't see it * in its mmap. Returns with exit status 0 if that is the case, 1 if * it's coherent, and other if there's a problem. */ #include #include #include #include #include #include #include #include #include #define DATA "coherent.mmap" int main(int argc, char *argv[]) { int tochild[2], toparent[2]; int fd; volatile unsigned char *map; unsigned char *page; const char *fname = argv[1]; char c = 0; if (pipe(tochild) != 0 || pipe(toparent) != 0) err(2, "Creating pipe"); if (!fname) fname = DATA; fd = open(fname, O_RDWR|O_CREAT|O_TRUNC, 0600); if (fd < 0) err(2, "opening %s", fname); unlink(fname); switch (fork()) { case -1: err(2, "Fork"); case 0: close(tochild[1]); close(toparent[0]); /* Wait for parent to create file. */ if (read(tochild[0], &c, 1) != 1) err(2, "reading from parent"); /* Alter first byte. */ pwrite(fd, &c, 1, 0); if (write(toparent[1], &c, 1) != 1) err(2, "writing to parent"); exit(0); default: close(tochild[0]); close(toparent[1]); /* Create a file and mmap it. */ page = malloc(getpagesize()); memset(page, 0x42, getpagesize()); if (write(fd, page, getpagesize()) != getpagesize()) err(2, "writing first page"); map = mmap(NULL, getpagesize(), PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); if (map == MAP_FAILED) err(2, "mapping file"); if (*map != 0x42) errx(2, "first byte isn't 0x42!"); /* Tell child to alter file. */ if (write(tochild[1], &c, 1) != 1) err(2, "writing to child"); if (read(toparent[0], &c, 1) != 1) err(2, "reading from child"); if (*map) errx(0, "mmap incoherent: first byte isn't 0."); exit(1); } } tdb-1.4.2/lib/replace/tests/main.c0000660000000000000000000000205013526763114016643 0ustar rootroot00000000000000/* Unix SMB/CIFS implementation. libreplace tests Copyright (C) Jelmer Vernooij 2006 ** NOTE! The following LGPL license applies to the talloc ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "replace.h" #include "replace-testsuite.h" int main(void) { bool ret = torture_local_replace(NULL); if (ret) return 0; return -1; } tdb-1.4.2/lib/replace/tests/os2_delete.c0000660000000000000000000000514213526763114017751 0ustar rootroot00000000000000/* test readdir/unlink pattern that OS/2 uses tridge@samba.org July 2005 */ #include #include #include #include #include #include #include #include #include #include "replace-test.h" #define NUM_FILES 700 #define READDIR_SIZE 100 #define DELETE_SIZE 4 #define TESTDIR "test.dir" static int test_readdir_os2_delete_ret; #define FAILED(d) (printf("failure: readdir [\nFailed for %s - %d = %s\n]\n", d, errno, strerror(errno)), test_readdir_os2_delete_ret = 1) #ifndef MIN #define MIN(a,b) ((a)<(b)?(a):(b)) #endif #ifdef _WIN32 #define mkdir(d,m) _mkdir(d) #endif static void cleanup(void) { /* I'm a lazy bastard */ if (system("rm -rf " TESTDIR)) { FAILED("system"); } mkdir(TESTDIR, 0700) == 0 || FAILED("mkdir"); } static void create_files(void) { int i; for (i=0;id_name); } if (i == 0) { return 0; } /* delete the first few */ for (j=0; jd_name, ".") == 0 || FAILED("match ."); de = readdir(d); strcmp(de->d_name, "..") == 0 || FAILED("match .."); while (1) { int n = os2_delete(d); if (n == 0) break; total_deleted += n; } closedir(d); fprintf(stderr, "Deleted %d files of %d\n", total_deleted, NUM_FILES); rmdir(TESTDIR) == 0 || FAILED("rmdir"); if (system("rm -rf " TESTDIR) == -1) { FAILED("system"); } return test_readdir_os2_delete_ret; } tdb-1.4.2/lib/replace/tests/shared_mmap.c0000660000000000000000000000231513526763114020203 0ustar rootroot00000000000000/* this tests whether we can use a shared writeable mmap on a file - as needed for the mmap variant of FAST_SHARE_MODES */ #if defined(HAVE_UNISTD_H) #include #endif #ifdef HAVE_STDLIB_H #include #endif #include #include #include #include #define DATA "conftest.mmap" #ifndef MAP_FILE #define MAP_FILE 0 #endif int main(void) { int *buf; int i; int fd = open(DATA,O_RDWR|O_CREAT|O_TRUNC,0666); int count=7; if (fd == -1) exit(1); for (i=0;i<10000;i++) { write(fd,&i,sizeof(i)); } close(fd); if (fork() == 0) { fd = open(DATA,O_RDWR); if (fd == -1) exit(1); buf = (int *)mmap(NULL, 10000*sizeof(int), (PROT_READ | PROT_WRITE), MAP_FILE | MAP_SHARED, fd, 0); while (count-- && buf[9124] != 55732) sleep(1); if (count <= 0) exit(1); buf[1763] = 7268; exit(0); } fd = open(DATA,O_RDWR); if (fd == -1) exit(1); buf = (int *)mmap(NULL, 10000*sizeof(int), (PROT_READ | PROT_WRITE), MAP_FILE | MAP_SHARED, fd, 0); if (buf == (int *)-1) exit(1); buf[9124] = 55732; while (count-- && buf[1763] != 7268) sleep(1); unlink(DATA); if (count > 0) exit(0); exit(1); } tdb-1.4.2/lib/replace/tests/shared_mremap.c0000660000000000000000000000141713526763114020534 0ustar rootroot00000000000000/* this tests whether we can use mremap */ #if defined(HAVE_UNISTD_H) #include #endif #ifdef HAVE_STDLIB_H #include #endif #include #include #include #include #define DATA "conftest.mmap" #ifndef MAP_FILE #define MAP_FILE 0 #endif #ifndef MAP_FAILED #define MAP_FAILED (int *)-1 #endif int main(void) { int *buf; int fd; int err = 1; fd = open(DATA, O_RDWR|O_CREAT|O_TRUNC, 0666); if (fd == -1) { exit(1); } buf = (int *)mmap(NULL, 0x1000, PROT_READ | PROT_WRITE, MAP_FILE | MAP_SHARED, fd, 0); if (buf == MAP_FAILED) { goto done; } buf = mremap(buf, 0x1000, 0x2000, MREMAP_MAYMOVE); if (buf == MAP_FAILED) { goto done; } err = 0; done: close(fd); unlink(DATA); exit(err); } tdb-1.4.2/lib/replace/tests/snprintf.c0000660000000000000000000000135513526763114017571 0ustar rootroot00000000000000void foo(const char *format, ...) { va_list ap; int len; char buf[20]; long long l = 1234567890; l *= 100; va_start(ap, format); len = vsnprintf(buf, 0, format, ap); va_end(ap); if (len != 5) exit(1); va_start(ap, format); len = vsnprintf(0, 0, format, ap); va_end(ap); if (len != 5) exit(2); if (snprintf(buf, 3, "hello") != 5 || strcmp(buf, "he") != 0) exit(3); if (snprintf(buf, 20, "%lld", l) != 12 || strcmp(buf, "123456789000") != 0) exit(4); if (snprintf(buf, 20, "%zu", 123456789) != 9 || strcmp(buf, "123456789") != 0) exit(5); if (snprintf(buf, 20, "%2\$d %1\$d", 3, 4) != 3 || strcmp(buf, "4 3") != 0) exit(6); if (snprintf(buf, 20, "%s", 0) < 3) exit(7); printf("1"); exit(0); } int main(void) { foo("hello"); } tdb-1.4.2/lib/replace/tests/strptime.c0000660000000000000000000000660613526763114017601 0ustar rootroot00000000000000 #ifdef LIBREPLACE_CONFIGURE_TEST_STRPTIME #include #include #include #define true 1 #define false 0 #ifndef __STRING #define __STRING(x) #x #endif /* make printf a no-op */ #define printf if(0) printf #else /* LIBREPLACE_CONFIGURE_TEST_STRPTIME */ #include "replace.h" #include "system/time.h" #include "replace-test.h" #endif /* LIBREPLACE_CONFIGURE_TEST_STRPTIME */ int libreplace_test_strptime(void) { const char *s = "20070414101546Z"; char *ret; struct tm t, t2; memset(&t, 0, sizeof(t)); memset(&t2, 0, sizeof(t2)); printf("test: strptime\n"); ret = strptime(s, "%Y%m%d%H%M%S", &t); if ( ret == NULL ) { printf("failure: strptime [\n" "returned NULL\n" "]\n"); return false; } if ( *ret != 'Z' ) { printf("failure: strptime [\n" "ret doesn't point to 'Z'\n" "]\n"); return false; } ret = strptime(s, "%Y%m%d%H%M%SZ", &t2); if ( ret == NULL ) { printf("failure: strptime [\n" "returned NULL with Z\n" "]\n"); return false; } if ( *ret != '\0' ) { printf("failure: strptime [\n" "ret doesn't point to '\\0'\n" "]\n"); return false; } #define CMP_TM_ELEMENT(t1,t2,elem) \ if (t1.elem != t2.elem) { \ printf("failure: strptime [\n" \ "result differs if the format string has a 'Z' at the end\n" \ "element: %s %d != %d\n" \ "]\n", \ __STRING(elen), t1.elem, t2.elem); \ return false; \ } CMP_TM_ELEMENT(t,t2,tm_sec); CMP_TM_ELEMENT(t,t2,tm_min); CMP_TM_ELEMENT(t,t2,tm_hour); CMP_TM_ELEMENT(t,t2,tm_mday); CMP_TM_ELEMENT(t,t2,tm_mon); CMP_TM_ELEMENT(t,t2,tm_year); CMP_TM_ELEMENT(t,t2,tm_wday); CMP_TM_ELEMENT(t,t2,tm_yday); CMP_TM_ELEMENT(t,t2,tm_isdst); if (t.tm_sec != 46) { printf("failure: strptime [\n" "tm_sec: expected: 46, got: %d\n" "]\n", t.tm_sec); return false; } if (t.tm_min != 15) { printf("failure: strptime [\n" "tm_min: expected: 15, got: %d\n" "]\n", t.tm_min); return false; } if (t.tm_hour != 10) { printf("failure: strptime [\n" "tm_hour: expected: 10, got: %d\n" "]\n", t.tm_hour); return false; } if (t.tm_mday != 14) { printf("failure: strptime [\n" "tm_mday: expected: 14, got: %d\n" "]\n", t.tm_mday); return false; } if (t.tm_mon != 3) { printf("failure: strptime [\n" "tm_mon: expected: 3, got: %d\n" "]\n", t.tm_mon); return false; } if (t.tm_year != 107) { printf("failure: strptime [\n" "tm_year: expected: 107, got: %d\n" "]\n", t.tm_year); return false; } if (t.tm_wday != 6) { /* saturday */ printf("failure: strptime [\n" "tm_wday: expected: 6, got: %d\n" "]\n", t.tm_wday); return false; } if (t.tm_yday != 103) { printf("failure: strptime [\n" "tm_yday: expected: 103, got: %d\n" "]\n", t.tm_yday); return false; } /* we don't test this as it depends on the host configuration if (t.tm_isdst != 0) { printf("failure: strptime [\n" "tm_isdst: expected: 0, got: %d\n" "]\n", t.tm_isdst); return false; }*/ printf("success: strptime\n"); return true; } #ifdef LIBREPLACE_CONFIGURE_TEST_STRPTIME int main (void) { int ret; ret = libreplace_test_strptime(); if (ret == false) return 1; return 0; } #endif tdb-1.4.2/lib/replace/tests/testsuite.c0000660000000000000000000007213513526763114017763 0ustar rootroot00000000000000/* Unix SMB/CIFS implementation. libreplace tests Copyright (C) Jelmer Vernooij 2006 ** NOTE! The following LGPL license applies to the talloc ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #include "replace.h" #include "replace-test.h" #include "replace-testsuite.h" /* we include all the system/ include files here so that libreplace tests them in the build farm */ #include "system/capability.h" #include "system/dir.h" #include "system/filesys.h" #include "system/glob.h" #include "system/iconv.h" #include "system/locale.h" #include "system/network.h" #include "system/passwd.h" #include "system/readline.h" #include "system/select.h" #include "system/shmem.h" #include "system/syslog.h" #include "system/terminal.h" #include "system/time.h" #include "system/wait.h" #include "system/aio.h" #define TESTFILE "testfile.dat" /* test ftruncate() function */ static int test_ftruncate(void) { struct stat st; int fd; const int size = 1234; printf("test: ftruncate\n"); unlink(TESTFILE); fd = open(TESTFILE, O_RDWR|O_CREAT, 0600); if (fd == -1) { printf("failure: ftruncate [\n" "creating '%s' failed - %s\n]\n", TESTFILE, strerror(errno)); return false; } if (ftruncate(fd, size) != 0) { printf("failure: ftruncate [\n%s\n]\n", strerror(errno)); close(fd); return false; } if (fstat(fd, &st) != 0) { printf("failure: ftruncate [\nfstat failed - %s\n]\n", strerror(errno)); close(fd); return false; } if (st.st_size != size) { printf("failure: ftruncate [\ngave wrong size %d - expected %d\n]\n", (int)st.st_size, size); close(fd); return false; } unlink(TESTFILE); printf("success: ftruncate\n"); close(fd); return true; } /* test strlcpy() function. see http://www.gratisoft.us/todd/papers/strlcpy.html */ static int test_strlcpy(void) { char buf[4]; const struct { const char *src; size_t result; } tests[] = { { "abc", 3 }, { "abcdef", 6 }, { "abcd", 4 }, { "", 0 }, { NULL, 0 } }; int i; printf("test: strlcpy\n"); for (i=0;tests[i].src;i++) { if (strlcpy(buf, tests[i].src, sizeof(buf)) != tests[i].result) { printf("failure: strlcpy [\ntest %d failed\n]\n", i); return false; } } printf("success: strlcpy\n"); return true; } static int test_strlcat(void) { char tmp[10]; printf("test: strlcat\n"); strlcpy(tmp, "", sizeof(tmp)); if (strlcat(tmp, "bla", 3) != 3) { printf("failure: strlcat [\ninvalid return code\n]\n"); return false; } if (strcmp(tmp, "bl") != 0) { printf("failure: strlcat [\nexpected \"bl\", got \"%s\"\n]\n", tmp); return false; } strlcpy(tmp, "da", sizeof(tmp)); if (strlcat(tmp, "me", 4) != 4) { printf("failure: strlcat [\nexpected \"dam\", got \"%s\"\n]\n", tmp); return false; } printf("success: strlcat\n"); return true; } static int test_mktime(void) { /* FIXME */ return true; } static int test_initgroups(void) { /* FIXME */ return true; } static int test_memmove(void) { /* FIXME */ return true; } static int test_strdup(void) { char *x; printf("test: strdup\n"); x = strdup("bla"); if (strcmp("bla", x) != 0) { printf("failure: strdup [\nfailed: expected \"bla\", got \"%s\"\n]\n", x); return false; } free(x); printf("success: strdup\n"); return true; } static int test_setlinebuf(void) { printf("test: setlinebuf\n"); setlinebuf(stdout); printf("success: setlinebuf\n"); return true; } static int test_vsyslog(void) { /* FIXME */ return true; } static int test_timegm(void) { /* FIXME */ return true; } static int test_setenv(void) { #define TEST_SETENV(key, value, overwrite, result) do { \ int _ret; \ char *_v; \ _ret = setenv(key, value, overwrite); \ if (_ret != 0) { \ printf("failure: setenv [\n" \ "setenv(%s, %s, %d) failed\n" \ "]\n", \ key, value, overwrite); \ return false; \ } \ _v=getenv(key); \ if (!_v) { \ printf("failure: setenv [\n" \ "getenv(%s) returned NULL\n" \ "]\n", \ key); \ return false; \ } \ if (strcmp(result, _v) != 0) { \ printf("failure: setenv [\n" \ "getenv(%s): '%s' != '%s'\n" \ "]\n", \ key, result, _v); \ return false; \ } \ } while(0) #define TEST_UNSETENV(key) do { \ char *_v; \ unsetenv(key); \ _v=getenv(key); \ if (_v) { \ printf("failure: setenv [\n" \ "getenv(%s): NULL != '%s'\n" \ "]\n", \ SETENVTEST_KEY, _v); \ return false; \ } \ } while (0) #define SETENVTEST_KEY "SETENVTESTKEY" #define SETENVTEST_VAL "SETENVTESTVAL" printf("test: setenv\n"); TEST_SETENV(SETENVTEST_KEY, SETENVTEST_VAL"1", 0, SETENVTEST_VAL"1"); TEST_SETENV(SETENVTEST_KEY, SETENVTEST_VAL"2", 0, SETENVTEST_VAL"1"); TEST_SETENV(SETENVTEST_KEY, SETENVTEST_VAL"3", 1, SETENVTEST_VAL"3"); TEST_SETENV(SETENVTEST_KEY, SETENVTEST_VAL"4", 1, SETENVTEST_VAL"4"); TEST_UNSETENV(SETENVTEST_KEY); TEST_UNSETENV(SETENVTEST_KEY); TEST_SETENV(SETENVTEST_KEY, SETENVTEST_VAL"5", 0, SETENVTEST_VAL"5"); TEST_UNSETENV(SETENVTEST_KEY); TEST_UNSETENV(SETENVTEST_KEY); printf("success: setenv\n"); return true; } static int test_strndup(void) { char *x; printf("test: strndup\n"); x = strndup("bla", 0); if (strcmp(x, "") != 0) { printf("failure: strndup [\ninvalid\n]\n"); return false; } free(x); x = strndup("bla", 2); if (strcmp(x, "bl") != 0) { printf("failure: strndup [\ninvalid\n]\n"); return false; } free(x); x = strndup("bla", 10); if (strcmp(x, "bla") != 0) { printf("failure: strndup [\ninvalid\n]\n"); free(x); return false; } free(x); printf("success: strndup\n"); return true; } static int test_strnlen(void) { printf("test: strnlen\n"); if (strnlen("bla", 2) != 2) { printf("failure: strnlen [\nunexpected length\n]\n"); return false; } if (strnlen("some text\n", 0) != 0) { printf("failure: strnlen [\nunexpected length\n]\n"); return false; } if (strnlen("some text", 20) != 9) { printf("failure: strnlen [\nunexpected length\n]\n"); return false; } printf("success: strnlen\n"); return true; } static int test_waitpid(void) { /* FIXME */ return true; } static int test_seteuid(void) { /* FIXME */ return true; } static int test_setegid(void) { /* FIXME */ return true; } static int test_asprintf(void) { char *x; printf("test: asprintf\n"); if (asprintf(&x, "%d", 9) != 1) { printf("failure: asprintf [\ngenerate asprintf\n]\n"); return false; } if (strcmp(x, "9") != 0) { printf("failure: asprintf [\ngenerate asprintf\n]\n"); return false; } if (asprintf(&x, "dat%s", "a") != 4) { printf("failure: asprintf [\ngenerate asprintf\n]\n"); return false; } if (strcmp(x, "data") != 0) { printf("failure: asprintf [\ngenerate asprintf\n]\n"); return false; } printf("success: asprintf\n"); return true; } static int test_snprintf(void) { char tmp[10]; printf("test: snprintf\n"); if (snprintf(tmp, 3, "foo%d", 9) != 4) { printf("failure: snprintf [\nsnprintf return code failed\n]\n"); return false; } if (strcmp(tmp, "fo") != 0) { printf("failure: snprintf [\nsnprintf failed\n]\n"); return false; } printf("success: snprintf\n"); return true; } static int test_vasprintf(void) { /* FIXME */ return true; } static int test_vsnprintf(void) { /* FIXME */ return true; } static int test_opendir(void) { /* FIXME */ return true; } static int test_readdir(void) { printf("test: readdir\n"); if (test_readdir_os2_delete() != 0) { return false; } printf("success: readdir\n"); return true; } static int test_telldir(void) { /* FIXME */ return true; } static int test_seekdir(void) { /* FIXME */ return true; } static int test_dlopen(void) { /* FIXME: test dlopen, dlsym, dlclose, dlerror */ return true; } static int test_chroot(void) { /* FIXME: chroot() */ return true; } static int test_bzero(void) { /* FIXME: bzero */ return true; } static int test_strerror(void) { /* FIXME */ return true; } static int test_errno(void) { printf("test: errno\n"); errno = 3; if (errno != 3) { printf("failure: errno [\nerrno failed\n]\n"); return false; } printf("success: errno\n"); return true; } static int test_mkdtemp(void) { /* FIXME */ return true; } static int test_mkstemp(void) { /* FIXME */ return true; } static int test_pread(void) { /* FIXME */ return true; } static int test_pwrite(void) { /* FIXME */ return true; } static int test_inet_ntoa(void) { /* FIXME */ return true; } #define TEST_STRTO_X(type,fmt,func,str,base,res,diff,rrnoo) do {\ type _v; \ char _s[64]; \ char *_p = NULL;\ char *_ep = NULL; \ strlcpy(_s, str, sizeof(_s));\ if (diff >= 0) { \ _ep = &_s[diff]; \ } \ errno = 0; \ _v = func(_s, &_p, base); \ if (errno != rrnoo) { \ printf("failure: %s [\n" \ "\t%s\n" \ "\t%s(\"%s\",%d,%d): " fmt " (=/!)= " fmt "\n" \ "\terrno: %d != %d\n" \ "]\n", \ __STRING(func), __location__, __STRING(func), \ str, diff, base, res, _v, rrnoo, errno); \ return false; \ } else if (_v != res) { \ printf("failure: %s [\n" \ "\t%s\n" \ "\t%s(\"%s\",%d,%d): " fmt " != " fmt "\n" \ "]\n", \ __STRING(func), __location__, __STRING(func), \ str, diff, base, res, _v); \ return false; \ } else if (_p != _ep) { \ printf("failure: %s [\n" \ "\t%s\n" \ "\t%s(\"%s\",%d,%d): " fmt " (=/!)= " fmt "\n" \ "\tptr: %p - %p = %d != %d\n" \ "]\n", \ __STRING(func), __location__, __STRING(func), \ str, diff, base, res, _v, _ep, _p, (int)(diff - (_ep - _p)), diff); \ return false; \ } \ } while (0) static int test_strtoll(void) { printf("test: strtoll\n"); #define TEST_STRTOLL(str,base,res,diff,errnoo) TEST_STRTO_X(long long int, "%lld", strtoll,str,base,res,diff,errnoo) TEST_STRTOLL("15", 10, 15LL, 2, 0); TEST_STRTOLL(" 15", 10, 15LL, 4, 0); TEST_STRTOLL("15", 0, 15LL, 2, 0); TEST_STRTOLL(" 15 ", 0, 15LL, 3, 0); TEST_STRTOLL("+15", 10, 15LL, 3, 0); TEST_STRTOLL(" +15", 10, 15LL, 5, 0); TEST_STRTOLL("+15", 0, 15LL, 3, 0); TEST_STRTOLL(" +15 ", 0, 15LL, 4, 0); TEST_STRTOLL("-15", 10, -15LL, 3, 0); TEST_STRTOLL(" -15", 10, -15LL, 5, 0); TEST_STRTOLL("-15", 0, -15LL, 3, 0); TEST_STRTOLL(" -15 ", 0, -15LL, 4, 0); TEST_STRTOLL("015", 10, 15LL, 3, 0); TEST_STRTOLL(" 015", 10, 15LL, 5, 0); TEST_STRTOLL("015", 0, 13LL, 3, 0); TEST_STRTOLL(" 015", 0, 13LL, 5, 0); TEST_STRTOLL("0x15", 10, 0LL, 1, 0); TEST_STRTOLL(" 0x15", 10, 0LL, 3, 0); TEST_STRTOLL("0x15", 0, 21LL, 4, 0); TEST_STRTOLL(" 0x15", 0, 21LL, 6, 0); TEST_STRTOLL("10", 16, 16LL, 2, 0); TEST_STRTOLL(" 10 ", 16, 16LL, 4, 0); TEST_STRTOLL("0x10", 16, 16LL, 4, 0); TEST_STRTOLL("0x10", 0, 16LL, 4, 0); TEST_STRTOLL(" 0x10 ", 0, 16LL, 5, 0); TEST_STRTOLL("+10", 16, 16LL, 3, 0); TEST_STRTOLL(" +10 ", 16, 16LL, 5, 0); TEST_STRTOLL("+0x10", 16, 16LL, 5, 0); TEST_STRTOLL("+0x10", 0, 16LL, 5, 0); TEST_STRTOLL(" +0x10 ", 0, 16LL, 6, 0); TEST_STRTOLL("-10", 16, -16LL, 3, 0); TEST_STRTOLL(" -10 ", 16, -16LL, 5, 0); TEST_STRTOLL("-0x10", 16, -16LL, 5, 0); TEST_STRTOLL("-0x10", 0, -16LL, 5, 0); TEST_STRTOLL(" -0x10 ", 0, -16LL, 6, 0); TEST_STRTOLL("010", 16, 16LL, 3, 0); TEST_STRTOLL(" 010 ", 16, 16LL, 5, 0); TEST_STRTOLL("-010", 16, -16LL, 4, 0); TEST_STRTOLL("11", 8, 9LL, 2, 0); TEST_STRTOLL("011", 8, 9LL, 3, 0); TEST_STRTOLL("011", 0, 9LL, 3, 0); TEST_STRTOLL("-11", 8, -9LL, 3, 0); TEST_STRTOLL("-011", 8, -9LL, 4, 0); TEST_STRTOLL("-011", 0, -9LL, 4, 0); TEST_STRTOLL("011", 8, 9LL, 3, 0); TEST_STRTOLL("011", 0, 9LL, 3, 0); TEST_STRTOLL("-11", 8, -9LL, 3, 0); TEST_STRTOLL("-011", 8, -9LL, 4, 0); TEST_STRTOLL("-011", 0, -9LL, 4, 0); TEST_STRTOLL("Text", 0, 0LL, 0, 0); TEST_STRTOLL("9223372036854775807", 10, 9223372036854775807LL, 19, 0); TEST_STRTOLL("9223372036854775807", 0, 9223372036854775807LL, 19, 0); TEST_STRTOLL("9223372036854775808", 0, 9223372036854775807LL, 19, ERANGE); TEST_STRTOLL("9223372036854775808", 10, 9223372036854775807LL, 19, ERANGE); TEST_STRTOLL("0x7FFFFFFFFFFFFFFF", 0, 9223372036854775807LL, 18, 0); TEST_STRTOLL("0x7FFFFFFFFFFFFFFF", 16, 9223372036854775807LL, 18, 0); TEST_STRTOLL("7FFFFFFFFFFFFFFF", 16, 9223372036854775807LL, 16, 0); TEST_STRTOLL("0x8000000000000000", 0, 9223372036854775807LL, 18, ERANGE); TEST_STRTOLL("0x8000000000000000", 16, 9223372036854775807LL, 18, ERANGE); TEST_STRTOLL("80000000000000000", 16, 9223372036854775807LL, 17, ERANGE); TEST_STRTOLL("0777777777777777777777", 0, 9223372036854775807LL, 22, 0); TEST_STRTOLL("0777777777777777777777", 8, 9223372036854775807LL, 22, 0); TEST_STRTOLL("777777777777777777777", 8, 9223372036854775807LL, 21, 0); TEST_STRTOLL("01000000000000000000000", 0, 9223372036854775807LL, 23, ERANGE); TEST_STRTOLL("01000000000000000000000", 8, 9223372036854775807LL, 23, ERANGE); TEST_STRTOLL("1000000000000000000000", 8, 9223372036854775807LL, 22, ERANGE); TEST_STRTOLL("-9223372036854775808", 10, -9223372036854775807LL -1, 20, 0); TEST_STRTOLL("-9223372036854775808", 0, -9223372036854775807LL -1, 20, 0); TEST_STRTOLL("-9223372036854775809", 0, -9223372036854775807LL -1, 20, ERANGE); TEST_STRTOLL("-9223372036854775809", 10, -9223372036854775807LL -1, 20, ERANGE); TEST_STRTOLL("-0x8000000000000000", 0, -9223372036854775807LL -1, 19, 0); TEST_STRTOLL("-0x8000000000000000", 16, -9223372036854775807LL -1, 19, 0); TEST_STRTOLL("-8000000000000000", 16, -9223372036854775807LL -1, 17, 0); TEST_STRTOLL("-0x8000000000000001", 0, -9223372036854775807LL -1, 19, ERANGE); TEST_STRTOLL("-0x8000000000000001", 16, -9223372036854775807LL -1, 19, ERANGE); TEST_STRTOLL("-80000000000000001", 16, -9223372036854775807LL -1, 18, ERANGE); TEST_STRTOLL("-01000000000000000000000",0, -9223372036854775807LL -1, 24, 0); TEST_STRTOLL("-01000000000000000000000",8, -9223372036854775807LL -1, 24, 0); TEST_STRTOLL("-1000000000000000000000", 8, -9223372036854775807LL -1, 23, 0); TEST_STRTOLL("-01000000000000000000001",0, -9223372036854775807LL -1, 24, ERANGE); TEST_STRTOLL("-01000000000000000000001",8, -9223372036854775807LL -1, 24, ERANGE); TEST_STRTOLL("-1000000000000000000001", 8, -9223372036854775807LL -1, 23, ERANGE); printf("success: strtoll\n"); return true; } static int test_strtoull(void) { printf("test: strtoull\n"); #define TEST_STRTOULL(str,base,res,diff,errnoo) TEST_STRTO_X(long long unsigned int,"%llu",strtoull,str,base,res,diff,errnoo) TEST_STRTOULL("15", 10, 15LLU, 2, 0); TEST_STRTOULL(" 15", 10, 15LLU, 4, 0); TEST_STRTOULL("15", 0, 15LLU, 2, 0); TEST_STRTOULL(" 15 ", 0, 15LLU, 3, 0); TEST_STRTOULL("+15", 10, 15LLU, 3, 0); TEST_STRTOULL(" +15", 10, 15LLU, 5, 0); TEST_STRTOULL("+15", 0, 15LLU, 3, 0); TEST_STRTOULL(" +15 ", 0, 15LLU, 4, 0); TEST_STRTOULL("-15", 10, 18446744073709551601LLU, 3, 0); TEST_STRTOULL(" -15", 10, 18446744073709551601LLU, 5, 0); TEST_STRTOULL("-15", 0, 18446744073709551601LLU, 3, 0); TEST_STRTOULL(" -15 ", 0, 18446744073709551601LLU, 4, 0); TEST_STRTOULL("015", 10, 15LLU, 3, 0); TEST_STRTOULL(" 015", 10, 15LLU, 5, 0); TEST_STRTOULL("015", 0, 13LLU, 3, 0); TEST_STRTOULL(" 015", 0, 13LLU, 5, 0); TEST_STRTOULL("0x15", 10, 0LLU, 1, 0); TEST_STRTOULL(" 0x15", 10, 0LLU, 3, 0); TEST_STRTOULL("0x15", 0, 21LLU, 4, 0); TEST_STRTOULL(" 0x15", 0, 21LLU, 6, 0); TEST_STRTOULL("10", 16, 16LLU, 2, 0); TEST_STRTOULL(" 10 ", 16, 16LLU, 4, 0); TEST_STRTOULL("0x10", 16, 16LLU, 4, 0); TEST_STRTOULL("0x10", 0, 16LLU, 4, 0); TEST_STRTOULL(" 0x10 ", 0, 16LLU, 5, 0); TEST_STRTOULL("+10", 16, 16LLU, 3, 0); TEST_STRTOULL(" +10 ", 16, 16LLU, 5, 0); TEST_STRTOULL("+0x10", 16, 16LLU, 5, 0); TEST_STRTOULL("+0x10", 0, 16LLU, 5, 0); TEST_STRTOULL(" +0x10 ", 0, 16LLU, 6, 0); TEST_STRTOULL("-10", 16, -16LLU, 3, 0); TEST_STRTOULL(" -10 ", 16, -16LLU, 5, 0); TEST_STRTOULL("-0x10", 16, -16LLU, 5, 0); TEST_STRTOULL("-0x10", 0, -16LLU, 5, 0); TEST_STRTOULL(" -0x10 ", 0, -16LLU, 6, 0); TEST_STRTOULL("010", 16, 16LLU, 3, 0); TEST_STRTOULL(" 010 ", 16, 16LLU, 5, 0); TEST_STRTOULL("-010", 16, -16LLU, 4, 0); TEST_STRTOULL("11", 8, 9LLU, 2, 0); TEST_STRTOULL("011", 8, 9LLU, 3, 0); TEST_STRTOULL("011", 0, 9LLU, 3, 0); TEST_STRTOULL("-11", 8, -9LLU, 3, 0); TEST_STRTOULL("-011", 8, -9LLU, 4, 0); TEST_STRTOULL("-011", 0, -9LLU, 4, 0); TEST_STRTOULL("011", 8, 9LLU, 3, 0); TEST_STRTOULL("011", 0, 9LLU, 3, 0); TEST_STRTOULL("-11", 8, -9LLU, 3, 0); TEST_STRTOULL("-011", 8, -9LLU, 4, 0); TEST_STRTOULL("-011", 0, -9LLU, 4, 0); TEST_STRTOULL("Text", 0, 0LLU, 0, 0); TEST_STRTOULL("9223372036854775807", 10, 9223372036854775807LLU, 19, 0); TEST_STRTOULL("9223372036854775807", 0, 9223372036854775807LLU, 19, 0); TEST_STRTOULL("9223372036854775808", 0, 9223372036854775808LLU, 19, 0); TEST_STRTOULL("9223372036854775808", 10, 9223372036854775808LLU, 19, 0); TEST_STRTOULL("0x7FFFFFFFFFFFFFFF", 0, 9223372036854775807LLU, 18, 0); TEST_STRTOULL("0x7FFFFFFFFFFFFFFF", 16, 9223372036854775807LLU, 18, 0); TEST_STRTOULL("7FFFFFFFFFFFFFFF", 16, 9223372036854775807LLU, 16, 0); TEST_STRTOULL("0x8000000000000000", 0, 9223372036854775808LLU, 18, 0); TEST_STRTOULL("0x8000000000000000", 16, 9223372036854775808LLU, 18, 0); TEST_STRTOULL("8000000000000000", 16, 9223372036854775808LLU, 16, 0); TEST_STRTOULL("0777777777777777777777", 0, 9223372036854775807LLU, 22, 0); TEST_STRTOULL("0777777777777777777777", 8, 9223372036854775807LLU, 22, 0); TEST_STRTOULL("777777777777777777777", 8, 9223372036854775807LLU, 21, 0); TEST_STRTOULL("01000000000000000000000",0, 9223372036854775808LLU, 23, 0); TEST_STRTOULL("01000000000000000000000",8, 9223372036854775808LLU, 23, 0); TEST_STRTOULL("1000000000000000000000", 8, 9223372036854775808LLU, 22, 0); TEST_STRTOULL("-9223372036854775808", 10, 9223372036854775808LLU, 20, 0); TEST_STRTOULL("-9223372036854775808", 0, 9223372036854775808LLU, 20, 0); TEST_STRTOULL("-9223372036854775809", 0, 9223372036854775807LLU, 20, 0); TEST_STRTOULL("-9223372036854775809", 10, 9223372036854775807LLU, 20, 0); TEST_STRTOULL("-0x8000000000000000", 0, 9223372036854775808LLU, 19, 0); TEST_STRTOULL("-0x8000000000000000", 16, 9223372036854775808LLU, 19, 0); TEST_STRTOULL("-8000000000000000", 16, 9223372036854775808LLU, 17, 0); TEST_STRTOULL("-0x8000000000000001", 0, 9223372036854775807LLU, 19, 0); TEST_STRTOULL("-0x8000000000000001", 16, 9223372036854775807LLU, 19, 0); TEST_STRTOULL("-8000000000000001", 16, 9223372036854775807LLU, 17, 0); TEST_STRTOULL("-01000000000000000000000",0, 9223372036854775808LLU, 24, 0); TEST_STRTOULL("-01000000000000000000000",8, 9223372036854775808LLU, 24, 0); TEST_STRTOULL("-1000000000000000000000",8, 9223372036854775808LLU, 23, 0); TEST_STRTOULL("-01000000000000000000001",0, 9223372036854775807LLU, 24, 0); TEST_STRTOULL("-01000000000000000000001",8, 9223372036854775807LLU, 24, 0); TEST_STRTOULL("-1000000000000000000001",8, 9223372036854775807LLU, 23, 0); TEST_STRTOULL("18446744073709551615", 0, 18446744073709551615LLU, 20, 0); TEST_STRTOULL("18446744073709551615", 10, 18446744073709551615LLU, 20, 0); TEST_STRTOULL("18446744073709551616", 0, 18446744073709551615LLU, 20, ERANGE); TEST_STRTOULL("18446744073709551616", 10, 18446744073709551615LLU, 20, ERANGE); TEST_STRTOULL("0xFFFFFFFFFFFFFFFF", 0, 18446744073709551615LLU, 18, 0); TEST_STRTOULL("0xFFFFFFFFFFFFFFFF", 16, 18446744073709551615LLU, 18, 0); TEST_STRTOULL("FFFFFFFFFFFFFFFF", 16, 18446744073709551615LLU, 16, 0); TEST_STRTOULL("0x10000000000000000", 0, 18446744073709551615LLU, 19, ERANGE); TEST_STRTOULL("0x10000000000000000", 16, 18446744073709551615LLU, 19, ERANGE); TEST_STRTOULL("10000000000000000", 16, 18446744073709551615LLU, 17, ERANGE); TEST_STRTOULL("01777777777777777777777",0, 18446744073709551615LLU, 23, 0); TEST_STRTOULL("01777777777777777777777",8, 18446744073709551615LLU, 23, 0); TEST_STRTOULL("1777777777777777777777", 8, 18446744073709551615LLU, 22, 0); TEST_STRTOULL("02000000000000000000000",0, 18446744073709551615LLU, 23, ERANGE); TEST_STRTOULL("02000000000000000000000",8, 18446744073709551615LLU, 23, ERANGE); TEST_STRTOULL("2000000000000000000000", 8, 18446744073709551615LLU, 22, ERANGE); TEST_STRTOULL("-18446744073709551615", 0, 1LLU, 21, 0); TEST_STRTOULL("-18446744073709551615", 10, 1LLU, 21, 0); TEST_STRTOULL("-18446744073709551616", 0, 18446744073709551615LLU, 21, ERANGE); TEST_STRTOULL("-18446744073709551616", 10, 18446744073709551615LLU, 21, ERANGE); TEST_STRTOULL("-0xFFFFFFFFFFFFFFFF", 0, 1LLU, 19, 0); TEST_STRTOULL("-0xFFFFFFFFFFFFFFFF", 16, 1LLU, 19, 0); TEST_STRTOULL("-FFFFFFFFFFFFFFFF", 16, 1LLU, 17, 0); TEST_STRTOULL("-0x10000000000000000", 0, 18446744073709551615LLU, 20, ERANGE); TEST_STRTOULL("-0x10000000000000000", 16, 18446744073709551615LLU, 20, ERANGE); TEST_STRTOULL("-10000000000000000", 16, 18446744073709551615LLU, 18, ERANGE); TEST_STRTOULL("-01777777777777777777777",0, 1LLU, 24, 0); TEST_STRTOULL("-01777777777777777777777",8, 1LLU, 24, 0); TEST_STRTOULL("-1777777777777777777777",8, 1LLU, 23, 0); TEST_STRTOULL("-02000000000000000000000",0, 18446744073709551615LLU, 24, ERANGE); TEST_STRTOULL("-02000000000000000000000",8, 18446744073709551615LLU, 24, ERANGE); TEST_STRTOULL("-2000000000000000000000",8, 18446744073709551615LLU, 23, ERANGE); printf("success: strtoull\n"); return true; } /* FIXME: Types: bool socklen_t uint{8,16,32,64}_t int{8,16,32,64}_t intptr_t Constants: PATH_NAME_MAX UINT{16,32,64}_MAX INT32_MAX */ static int test_va_copy(void) { /* FIXME */ return true; } static int test_FUNCTION(void) { printf("test: FUNCTION\n"); if (strcmp(__FUNCTION__, "test_FUNCTION") != 0) { printf("failure: FUNCTION [\nFUNCTION invalid\n]\n"); return false; } printf("success: FUNCTION\n"); return true; } static int test_MIN(void) { printf("test: MIN\n"); if (MIN(20, 1) != 1) { printf("failure: MIN [\nMIN invalid\n]\n"); return false; } if (MIN(1, 20) != 1) { printf("failure: MIN [\nMIN invalid\n]\n"); return false; } printf("success: MIN\n"); return true; } static int test_MAX(void) { printf("test: MAX\n"); if (MAX(20, 1) != 20) { printf("failure: MAX [\nMAX invalid\n]\n"); return false; } if (MAX(1, 20) != 20) { printf("failure: MAX [\nMAX invalid\n]\n"); return false; } printf("success: MAX\n"); return true; } static int test_socketpair(void) { int sock[2]; char buf[20]; printf("test: socketpair\n"); if (socketpair(AF_UNIX, SOCK_STREAM, 0, sock) == -1) { printf("failure: socketpair [\n" "socketpair() failed\n" "]\n"); return false; } if (write(sock[1], "automatisch", 12) == -1) { printf("failure: socketpair [\n" "write() failed: %s\n" "]\n", strerror(errno)); return false; } if (read(sock[0], buf, 12) == -1) { printf("failure: socketpair [\n" "read() failed: %s\n" "]\n", strerror(errno)); return false; } if (strcmp(buf, "automatisch") != 0) { printf("failure: socketpair [\n" "expected: automatisch, got: %s\n" "]\n", buf); return false; } printf("success: socketpair\n"); return true; } extern int libreplace_test_strptime(void); static int test_strptime(void) { return libreplace_test_strptime(); } extern int getifaddrs_test(void); static int test_getifaddrs(void) { printf("test: getifaddrs\n"); if (getifaddrs_test() != 0) { printf("failure: getifaddrs\n"); return false; } printf("success: getifaddrs\n"); return true; } static int test_utime(void) { struct utimbuf u; struct stat st1, st2, st3; int fd; printf("test: utime\n"); unlink(TESTFILE); fd = open(TESTFILE, O_RDWR|O_CREAT, 0600); if (fd == -1) { printf("failure: utime [\n" "creating '%s' failed - %s\n]\n", TESTFILE, strerror(errno)); return false; } if (fstat(fd, &st1) != 0) { printf("failure: utime [\n" "fstat (1) failed - %s\n]\n", strerror(errno)); close(fd); return false; } u.actime = st1.st_atime + 300; u.modtime = st1.st_mtime - 300; if (utime(TESTFILE, &u) != 0) { printf("failure: utime [\n" "utime(&u) failed - %s\n]\n", strerror(errno)); close(fd); return false; } if (fstat(fd, &st2) != 0) { printf("failure: utime [\n" "fstat (2) failed - %s\n]\n", strerror(errno)); close(fd); return false; } if (utime(TESTFILE, NULL) != 0) { printf("failure: utime [\n" "utime(NULL) failed - %s\n]\n", strerror(errno)); close(fd); return false; } if (fstat(fd, &st3) != 0) { printf("failure: utime [\n" "fstat (3) failed - %s\n]\n", strerror(errno)); close(fd); return false; } #define CMP_VAL(a,c,b) do { \ if (a c b) { \ printf("failure: utime [\n" \ "%s: %s(%d) %s %s(%d)\n]\n", \ __location__, \ #a, (int)a, #c, #b, (int)b); \ close(fd); \ return false; \ } \ } while(0) #define EQUAL_VAL(a,b) CMP_VAL(a,!=,b) #define GREATER_VAL(a,b) CMP_VAL(a,<=,b) #define LESSER_VAL(a,b) CMP_VAL(a,>=,b) EQUAL_VAL(st2.st_atime, st1.st_atime + 300); EQUAL_VAL(st2.st_mtime, st1.st_mtime - 300); LESSER_VAL(st3.st_atime, st2.st_atime); GREATER_VAL(st3.st_mtime, st2.st_mtime); #undef CMP_VAL #undef EQUAL_VAL #undef GREATER_VAL #undef LESSER_VAL unlink(TESTFILE); printf("success: utime\n"); close(fd); return true; } static int test_utimes(void) { struct timeval tv[2]; struct stat st1, st2; int fd; printf("test: utimes\n"); unlink(TESTFILE); fd = open(TESTFILE, O_RDWR|O_CREAT, 0600); if (fd == -1) { printf("failure: utimes [\n" "creating '%s' failed - %s\n]\n", TESTFILE, strerror(errno)); return false; } if (fstat(fd, &st1) != 0) { printf("failure: utimes [\n" "fstat (1) failed - %s\n]\n", strerror(errno)); close(fd); return false; } ZERO_STRUCT(tv); tv[0].tv_sec = st1.st_atime + 300; tv[1].tv_sec = st1.st_mtime - 300; if (utimes(TESTFILE, tv) != 0) { printf("failure: utimes [\n" "utimes(tv) failed - %s\n]\n", strerror(errno)); close(fd); return false; } if (fstat(fd, &st2) != 0) { printf("failure: utimes [\n" "fstat (2) failed - %s\n]\n", strerror(errno)); close(fd); return false; } #define EQUAL_VAL(a,b) do { \ if (a != b) { \ printf("failure: utimes [\n" \ "%s: %s(%d) != %s(%d)\n]\n", \ __location__, \ #a, (int)a, #b, (int)b); \ close(fd); \ return false; \ } \ } while(0) EQUAL_VAL(st2.st_atime, st1.st_atime + 300); EQUAL_VAL(st2.st_mtime, st1.st_mtime - 300); #undef EQUAL_VAL unlink(TESTFILE); printf("success: utimes\n"); close(fd); return true; } static int test_memmem(void) { char *s; printf("test: memmem\n"); s = (char *)memmem("foo", 3, "fo", 2); if (strcmp(s, "foo") != 0) { printf(__location__ ": Failed memmem\n"); return false; } s = (char *)memmem("foo", 3, "", 0); /* it is allowable for this to return NULL (as happens on FreeBSD) */ if (s && strcmp(s, "foo") != 0) { printf(__location__ ": Failed memmem\n"); return false; } s = (char *)memmem("foo", 4, "o", 1); if (strcmp(s, "oo") != 0) { printf(__location__ ": Failed memmem\n"); return false; } s = (char *)memmem("foobarfodx", 11, "fod", 3); if (strcmp(s, "fodx") != 0) { printf(__location__ ": Failed memmem\n"); return false; } printf("success: memmem\n"); return true; } static bool test_closefrom(void) { int i, fd; for (i=0; i<100; i++) { fd = dup(0); if (fd == -1) { perror("dup failed"); return false; } /* 1000 is just an arbitrarily chosen upper bound */ if (fd >= 1000) { printf("fd=%d\n", fd); return false; } } closefrom(3); for (i=3; i<=fd; i++) { off_t off; off = lseek(i, 0, SEEK_CUR); if ((off != (off_t)-1) || (errno != EBADF)) { printf("fd %d not closed\n", i); return false; } } return true; } bool torture_local_replace(struct torture_context *ctx) { bool ret = true; ret &= test_ftruncate(); ret &= test_strlcpy(); ret &= test_strlcat(); ret &= test_mktime(); ret &= test_initgroups(); ret &= test_memmove(); ret &= test_strdup(); ret &= test_setlinebuf(); ret &= test_vsyslog(); ret &= test_timegm(); ret &= test_setenv(); ret &= test_strndup(); ret &= test_strnlen(); ret &= test_waitpid(); ret &= test_seteuid(); ret &= test_setegid(); ret &= test_asprintf(); ret &= test_snprintf(); ret &= test_vasprintf(); ret &= test_vsnprintf(); ret &= test_opendir(); ret &= test_readdir(); ret &= test_telldir(); ret &= test_seekdir(); ret &= test_dlopen(); ret &= test_chroot(); ret &= test_bzero(); ret &= test_strerror(); ret &= test_errno(); ret &= test_mkdtemp(); ret &= test_mkstemp(); ret &= test_pread(); ret &= test_pwrite(); ret &= test_inet_ntoa(); ret &= test_strtoll(); ret &= test_strtoull(); ret &= test_va_copy(); ret &= test_FUNCTION(); ret &= test_MIN(); ret &= test_MAX(); ret &= test_socketpair(); ret &= test_strptime(); ret &= test_getifaddrs(); ret &= test_utime(); ret &= test_utimes(); ret &= test_memmem(); ret &= test_closefrom(); return ret; } tdb-1.4.2/lib/replace/timegm.c0000660000000000000000000000476412406075657016061 0ustar rootroot00000000000000/* * Copyright (c) 1997 Kungliga Tekniska Högskolan * (Royal Institute of Technology, Stockholm, Sweden). * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Institute nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* adapted for Samba4 by Andrew Tridgell */ #include "replace.h" #include "system/time.h" static int is_leap(unsigned y) { y += 1900; return (y % 4) == 0 && ((y % 100) != 0 || (y % 400) == 0); } time_t rep_timegm(struct tm *tm) { static const unsigned ndays[2][12] ={ {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}, {31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}}; time_t res = 0; unsigned i; if (tm->tm_mon > 12 || tm->tm_mon < 0 || tm->tm_mday > 31 || tm->tm_min > 60 || tm->tm_sec > 60 || tm->tm_hour > 24) { /* invalid tm structure */ return 0; } for (i = 70; i < tm->tm_year; ++i) res += is_leap(i) ? 366 : 365; for (i = 0; i < tm->tm_mon; ++i) res += ndays[is_leap(tm->tm_year)][i]; res += tm->tm_mday - 1; res *= 24; res += tm->tm_hour; res *= 60; res += tm->tm_min; res *= 60; res += tm->tm_sec; return res; } tdb-1.4.2/lib/replace/win32_replace.h0000660000000000000000000001046412406075657017233 0ustar rootroot00000000000000#ifndef _WIN32_REPLACE_H #define _WIN32_REPLACE_H #ifdef HAVE_WINSOCK2_H #include #endif #ifdef HAVE_WS2TCPIP_H #include #endif #ifdef HAVE_WINDOWS_H #include #endif /* Map BSD Socket errorcodes to the WSA errorcodes (if possible) */ #define EAFNOSUPPORT WSAEAFNOSUPPORT #define ECONNREFUSED WSAECONNREFUSED #define EINPROGRESS WSAEINPROGRESS #define EMSGSIZE WSAEMSGSIZE #define ENOBUFS WSAENOBUFS #define ENOTSOCK WSAENOTSOCK #define ENETUNREACH WSAENETUNREACH #define ENOPROTOOPT WSAENOPROTOOPT #define ENOTCONN WSAENOTCONN #define ENOTSUP 134 /* We undefine the following constants due to conflicts with the w32api headers * and the Windows Platform SDK/DDK. */ #undef interface #undef ERROR_INVALID_PARAMETER #undef ERROR_INSUFFICIENT_BUFFER #undef ERROR_INVALID_DATATYPE #undef FILE_GENERIC_READ #undef FILE_GENERIC_WRITE #undef FILE_GENERIC_EXECUTE #undef FILE_ATTRIBUTE_READONLY #undef FILE_ATTRIBUTE_HIDDEN #undef FILE_ATTRIBUTE_SYSTEM #undef FILE_ATTRIBUTE_DIRECTORY #undef FILE_ATTRIBUTE_ARCHIVE #undef FILE_ATTRIBUTE_DEVICE #undef FILE_ATTRIBUTE_NORMAL #undef FILE_ATTRIBUTE_TEMPORARY #undef FILE_ATTRIBUTE_REPARSE_POINT #undef FILE_ATTRIBUTE_COMPRESSED #undef FILE_ATTRIBUTE_OFFLINE #undef FILE_ATTRIBUTE_ENCRYPTED #undef FILE_FLAG_WRITE_THROUGH #undef FILE_FLAG_NO_BUFFERING #undef FILE_FLAG_RANDOM_ACCESS #undef FILE_FLAG_SEQUENTIAL_SCAN #undef FILE_FLAG_DELETE_ON_CLOSE #undef FILE_FLAG_BACKUP_SEMANTICS #undef FILE_FLAG_POSIX_SEMANTICS #undef FILE_TYPE_DISK #undef FILE_TYPE_UNKNOWN #undef FILE_CASE_SENSITIVE_SEARCH #undef FILE_CASE_PRESERVED_NAMES #undef FILE_UNICODE_ON_DISK #undef FILE_PERSISTENT_ACLS #undef FILE_FILE_COMPRESSION #undef FILE_VOLUME_QUOTAS #undef FILE_VOLUME_IS_COMPRESSED #undef FILE_NOTIFY_CHANGE_FILE_NAME #undef FILE_NOTIFY_CHANGE_DIR_NAME #undef FILE_NOTIFY_CHANGE_ATTRIBUTES #undef FILE_NOTIFY_CHANGE_SIZE #undef FILE_NOTIFY_CHANGE_LAST_WRITE #undef FILE_NOTIFY_CHANGE_LAST_ACCESS #undef FILE_NOTIFY_CHANGE_CREATION #undef FILE_NOTIFY_CHANGE_EA #undef FILE_NOTIFY_CHANGE_SECURITY #undef FILE_NOTIFY_CHANGE_STREAM_NAME #undef FILE_NOTIFY_CHANGE_STREAM_SIZE #undef FILE_NOTIFY_CHANGE_STREAM_WRITE #undef FILE_NOTIFY_CHANGE_NAME #undef PRINTER_ATTRIBUTE_QUEUED #undef PRINTER_ATTRIBUTE_DIRECT #undef PRINTER_ATTRIBUTE_DEFAULT #undef PRINTER_ATTRIBUTE_SHARED #undef PRINTER_ATTRIBUTE_NETWORK #undef PRINTER_ATTRIBUTE_HIDDEN #undef PRINTER_ATTRIBUTE_LOCAL #undef PRINTER_ATTRIBUTE_ENABLE_DEVQ #undef PRINTER_ATTRIBUTE_KEEPPRINTEDJOBS #undef PRINTER_ATTRIBUTE_DO_COMPLETE_FIRST #undef PRINTER_ATTRIBUTE_WORK_OFFLINE #undef PRINTER_ATTRIBUTE_ENABLE_BIDI #undef PRINTER_ATTRIBUTE_RAW_ONLY #undef PRINTER_ATTRIBUTE_PUBLISHED #undef PRINTER_ENUM_DEFAULT #undef PRINTER_ENUM_LOCAL #undef PRINTER_ENUM_CONNECTIONS #undef PRINTER_ENUM_FAVORITE #undef PRINTER_ENUM_NAME #undef PRINTER_ENUM_REMOTE #undef PRINTER_ENUM_SHARED #undef PRINTER_ENUM_NETWORK #undef PRINTER_ENUM_EXPAND #undef PRINTER_ENUM_CONTAINER #undef PRINTER_ENUM_ICON1 #undef PRINTER_ENUM_ICON2 #undef PRINTER_ENUM_ICON3 #undef PRINTER_ENUM_ICON4 #undef PRINTER_ENUM_ICON5 #undef PRINTER_ENUM_ICON6 #undef PRINTER_ENUM_ICON7 #undef PRINTER_ENUM_ICON8 #undef PRINTER_STATUS_PAUSED #undef PRINTER_STATUS_ERROR #undef PRINTER_STATUS_PENDING_DELETION #undef PRINTER_STATUS_PAPER_JAM #undef PRINTER_STATUS_PAPER_OUT #undef PRINTER_STATUS_MANUAL_FEED #undef PRINTER_STATUS_PAPER_PROBLEM #undef PRINTER_STATUS_OFFLINE #undef PRINTER_STATUS_IO_ACTIVE #undef PRINTER_STATUS_BUSY #undef PRINTER_STATUS_PRINTING #undef PRINTER_STATUS_OUTPUT_BIN_FULL #undef PRINTER_STATUS_NOT_AVAILABLE #undef PRINTER_STATUS_WAITING #undef PRINTER_STATUS_PROCESSING #undef PRINTER_STATUS_INITIALIZING #undef PRINTER_STATUS_WARMING_UP #undef PRINTER_STATUS_TONER_LOW #undef PRINTER_STATUS_NO_TONER #undef PRINTER_STATUS_PAGE_PUNT #undef PRINTER_STATUS_USER_INTERVENTION #undef PRINTER_STATUS_OUT_OF_MEMORY #undef PRINTER_STATUS_DOOR_OPEN #undef PRINTER_STATUS_SERVER_UNKNOWN #undef PRINTER_STATUS_POWER_SAVE #undef DWORD #undef HKEY_CLASSES_ROOT #undef HKEY_CURRENT_USER #undef HKEY_LOCAL_MACHINE #undef HKEY_USERS #undef HKEY_PERFORMANCE_DATA #undef HKEY_CURRENT_CONFIG #undef HKEY_DYN_DATA #undef REG_DWORD #undef REG_QWORD #undef SERVICE_STATE_ALL #undef SE_GROUP_MANDATORY #undef SE_GROUP_ENABLED_BY_DEFAULT #undef SE_GROUP_ENABLED #endif /* _WIN32_REPLACE_H */ tdb-1.4.2/lib/replace/wscript0000660000000000000000000011667613527011454016045 0ustar rootroot00000000000000#!/usr/bin/env python APPNAME = 'libreplace' VERSION = '1.2.1' import sys import os # find the buildtools directory top = '.' while not os.path.exists(top+'/buildtools') and len(top.split('/')) < 5: top = top + '/..' sys.path.insert(0, top + '/buildtools/wafsamba') out = 'bin' import wafsamba from wafsamba import samba_dist from waflib import Options, Utils, Logs, Context samba_dist.DIST_DIRS('lib/replace buildtools:buildtools third_party/waf:third_party/waf') def options(opt): opt.BUILTIN_DEFAULT('NONE') opt.PRIVATE_EXTENSION_DEFAULT('') opt.RECURSE('buildtools/wafsamba') @Utils.run_once def configure(conf): conf.RECURSE('buildtools/wafsamba') conf.env.standalone_replace = conf.IN_LAUNCH_DIR() conf.DEFINE('HAVE_LIBREPLACE', 1) conf.DEFINE('LIBREPLACE_NETWORK_CHECKS', 1) conf.CHECK_HEADERS('linux/types.h crypt.h locale.h acl/libacl.h compat.h') conf.CHECK_HEADERS('acl/libacl.h attr/xattr.h compat.h ctype.h dustat.h') conf.CHECK_HEADERS('fcntl.h fnmatch.h glob.h history.h krb5.h langinfo.h') conf.CHECK_HEADERS('libaio.h locale.h ndir.h pwd.h') conf.CHECK_HEADERS('shadow.h sys/acl.h') conf.CHECK_HEADERS('sys/attributes.h attr/attributes.h sys/capability.h sys/dir.h sys/epoll.h') conf.CHECK_HEADERS('port.h') conf.CHECK_HEADERS('sys/fcntl.h sys/filio.h sys/filsys.h sys/fs/s5param.h') conf.CHECK_HEADERS('sys/id.h sys/ioctl.h sys/ipc.h sys/mman.h sys/mode.h sys/ndir.h sys/priv.h') conf.CHECK_HEADERS('sys/resource.h sys/security.h sys/shm.h sys/statfs.h sys/statvfs.h sys/termio.h') conf.CHECK_HEADERS('sys/vfs.h sys/xattr.h termio.h termios.h sys/file.h') conf.CHECK_HEADERS('sys/ucontext.h sys/wait.h sys/stat.h') if not conf.CHECK_DECLS('malloc', headers='stdlib.h'): conf.CHECK_HEADERS('malloc.h') conf.CHECK_HEADERS('grp.h') conf.CHECK_HEADERS('sys/select.h setjmp.h utime.h sys/syslog.h syslog.h') conf.CHECK_HEADERS('stdarg.h vararg.h sys/mount.h mntent.h') conf.CHECK_HEADERS('stropts.h unix.h string.h strings.h sys/param.h limits.h') conf.CHECK_HEADERS('''sys/socket.h netinet/in.h netdb.h arpa/inet.h netinet/in_systm.h netinet/ip.h netinet/tcp.h netinet/in_ip.h sys/sockio.h sys/un.h''', together=True) conf.CHECK_HEADERS('sys/uio.h ifaddrs.h direct.h dirent.h') conf.CHECK_HEADERS('windows.h winsock2.h ws2tcpip.h') conf.CHECK_HEADERS('errno.h') conf.CHECK_HEADERS('getopt.h iconv.h') conf.CHECK_HEADERS('memory.h nss.h sasl/sasl.h') conf.CHECK_FUNCS_IN('inotify_init', 'inotify', checklibc=True, headers='sys/inotify.h') conf.CHECK_HEADERS('security/pam_appl.h zlib.h asm/unistd.h') conf.CHECK_HEADERS('aio.h sys/unistd.h alloca.h float.h') conf.SET_TARGET_TYPE('tirpc', 'EMPTY') if conf.CHECK_CODE( '\n#ifndef _TIRPC_RPC_H\n#error "no tirpc headers in system path"\n#endif\n', 'HAVE_RPC_RPC_HEADERS', headers=['rpc/rpc.h', 'rpc/nettype.h'], msg='Checking for tirpc rpc headers in default system path'): if conf.CONFIG_SET('HAVE_RPC_RPC_H'): conf.undefine('HAVE_RPC_RPC_H') if not conf.CONFIG_SET('HAVE_RPC_RPC_H'): if conf.CHECK_CFG(package='libtirpc', args='--cflags --libs', msg='Checking for libtirpc headers', uselib_store='TIRPC'): conf.CHECK_HEADERS('rpc/rpc.h rpc/nettype.h', lib='tirpc', together=True) conf.SET_TARGET_TYPE('tirpc', 'SYSLIB') if not conf.CONFIG_SET('HAVE_RPC_RPC_H'): if conf.CHECK_CFG(package='libntirpc', args='--cflags', msg='Checking for libntirpc headers', uselib_store='TIRPC'): conf.CHECK_HEADERS('rpc/rpc.h rpc/nettype.h', lib='tirpc', together=True) conf.SET_TARGET_TYPE('tirpc', 'SYSLIB') if not conf.CONFIG_SET('HAVE_RPC_RPC_H'): Logs.warn('No rpc/rpc.h header found, tirpc or libntirpc missing?') conf.SET_TARGET_TYPE('nsl', 'EMPTY') conf.CHECK_HEADERS('rpc/rpc.h rpcsvc/yp_prot.h', lib='tirpc') if not conf.CONFIG_SET('HAVE_RPCSVC_YP_PROT_H'): if conf.CHECK_CFG(package='libnsl', args='--cflags --libs', msg='Checking for libnsl', uselib_store='NSL'): conf.SET_TARGET_TYPE('nsl', 'SYSLIB') conf.CHECK_HEADERS('rpc/rpc.h rpcsvc/yp_prot.h', lib='tirpc nsl') else: conf.SET_TARGET_TYPE('nsl', 'SYSLIB') conf.CHECK_HEADERS('rpcsvc/nis.h rpcsvc/ypclnt.h', lib='tirpc nsl') conf.CHECK_HEADERS('sys/sysctl.h') conf.CHECK_HEADERS('sys/fileio.h sys/filesys.h sys/dustat.h sys/sysmacros.h') conf.CHECK_HEADERS('xfs/libxfs.h netgroup.h') conf.CHECK_HEADERS('valgrind.h valgrind/valgrind.h') conf.CHECK_HEADERS('valgrind/memcheck.h valgrind/helgrind.h') conf.CHECK_HEADERS('nss_common.h nsswitch.h ns_api.h') conf.CHECK_HEADERS('sys/extattr.h sys/ea.h sys/proplist.h sys/cdefs.h') conf.CHECK_HEADERS('utmp.h utmpx.h lastlog.h') conf.CHECK_HEADERS('syscall.h sys/syscall.h inttypes.h') conf.CHECK_HEADERS('sys/atomic.h stdatomic.h') conf.CHECK_HEADERS('libgen.h') if conf.CHECK_CFLAGS('-Wno-format-truncation'): conf.define('HAVE_WNO_FORMAT_TRUNCATION', '1') if conf.CHECK_CFLAGS('-Wno-unused-function'): conf.define('HAVE_WNO_UNUSED_FUNCTION', '1') if conf.CHECK_CFLAGS('-Wno-strict-overflow'): conf.define('HAVE_WNO_STRICT_OVERFLOW', '1') # Check for process set name support conf.CHECK_CODE(''' #include int main(void) { prctl(0); return 0; } ''', 'HAVE_PRCTL', headers='sys/prctl.h', msg='Checking for prctl syscall') conf.CHECK_CODE(''' #include #ifdef HAVE_FCNTL_H #include #endif int main(void) { int fd = open("/dev/null", O_DIRECT); } ''', define='HAVE_OPEN_O_DIRECT', addmain=False, msg='Checking for O_DIRECT flag to open(2)') conf.CHECK_TYPES('"long long" intptr_t uintptr_t ptrdiff_t comparison_fn_t') conf.CHECK_TYPE('_Bool', define='HAVE__Bool') conf.CHECK_TYPE('bool', define='HAVE_BOOL') conf.CHECK_TYPE('int8_t', 'char') conf.CHECK_TYPE('uint8_t', 'unsigned char') conf.CHECK_TYPE('int16_t', 'short') conf.CHECK_TYPE('uint16_t', 'unsigned short') conf.CHECK_TYPE('int32_t', 'int') conf.CHECK_TYPE('uint32_t', 'unsigned') conf.CHECK_TYPE('int64_t', 'long long') conf.CHECK_TYPE('uint64_t', 'unsigned long long') conf.CHECK_TYPE('size_t', 'unsigned int') conf.CHECK_TYPE('ssize_t', 'int') conf.CHECK_TYPE('ino_t', 'unsigned') conf.CHECK_TYPE('loff_t', 'off_t') conf.CHECK_TYPE('offset_t', 'loff_t') conf.CHECK_TYPE('volatile int', define='HAVE_VOLATILE') conf.CHECK_TYPE('uint_t', 'unsigned int') conf.CHECK_TYPE('blksize_t', 'long', headers='sys/types.h sys/stat.h unistd.h') conf.CHECK_TYPE('blkcnt_t', 'long', headers='sys/types.h sys/stat.h unistd.h') conf.CHECK_SIZEOF('bool char int "long long" long short size_t ssize_t') conf.CHECK_SIZEOF('int8_t uint8_t int16_t uint16_t int32_t uint32_t int64_t uint64_t') conf.CHECK_SIZEOF('void*', define='SIZEOF_VOID_P') conf.CHECK_SIZEOF('off_t dev_t ino_t time_t') conf.CHECK_TYPES('socklen_t', headers='sys/socket.h') conf.CHECK_TYPE_IN('struct ifaddrs', 'ifaddrs.h') conf.CHECK_TYPE_IN('struct addrinfo', 'netdb.h') conf.CHECK_TYPE_IN('struct sockaddr', 'sys/socket.h') conf.CHECK_CODE('struct sockaddr_in6 x', define='HAVE_STRUCT_SOCKADDR_IN6', headers='sys/socket.h netdb.h netinet/in.h') conf.CHECK_TYPE_IN('struct sockaddr_storage', 'sys/socket.h') conf.CHECK_TYPE_IN('sa_family_t', 'sys/socket.h') conf.CHECK_TYPE_IN('sig_atomic_t', 'signal.h', define='HAVE_SIG_ATOMIC_T_TYPE') conf.CHECK_FUNCS('sigsetmask siggetmask sigprocmask sigblock sigaction sigset') conf.CHECK_FUNCS_IN('''inet_ntoa inet_aton inet_ntop inet_pton connect gethostbyname getaddrinfo getnameinfo freeaddrinfo gai_strerror socketpair''', 'socket nsl', checklibc=True, headers='sys/socket.h netinet/in.h arpa/inet.h netdb.h') conf.CHECK_FUNCS('memset_s memset_explicit') conf.CHECK_CODE(''' #include int main(void) { char buf[] = "This is some content"; memset(buf, '\0', sizeof(buf)); __asm__ volatile("" : : "g"(&buf) : "memory"); return 0; } ''', define='HAVE_GCC_VOLATILE_MEMORY_PROTECTION', addmain=False, msg='Checking for volatile memory protection', local_include=False) # Some old Linux systems have broken header files and # miss the IPV6_V6ONLY define in netinet/in.h, # but have it in linux/in6.h. # We can't include both files so we just check if the value # if defined and do the replacement in system/network.h if not conf.CHECK_VARIABLE('IPV6_V6ONLY', headers='sys/socket.h netdb.h netinet/in.h'): conf.CHECK_CODE(''' #include #if (IPV6_V6ONLY != 26) #error no IPV6_V6ONLY support on linux #endif int main(void) { return IPV6_V6ONLY; } ''', define='HAVE_LINUX_IPV6_V6ONLY_26', addmain=False, msg='Checking for IPV6_V6ONLY in linux/in6.h', local_include=False) conf.CHECK_CODE(''' struct sockaddr_storage sa_store; struct addrinfo *ai = NULL; struct in6_addr in6addr; int idx = if_nametoindex("iface1"); int s = socket(AF_INET6, SOCK_STREAM, 0); int ret = getaddrinfo(NULL, NULL, NULL, &ai); if (ret != 0) { const char *es = gai_strerror(ret); } freeaddrinfo(ai); { int val = 1; #ifdef HAVE_LINUX_IPV6_V6ONLY_26 #define IPV6_V6ONLY 26 #endif ret = setsockopt(s, IPPROTO_IPV6, IPV6_V6ONLY, (const void *)&val, sizeof(val)); } ''', define='HAVE_IPV6', lib='nsl socket', headers='sys/socket.h netdb.h netinet/in.h net/if.h') if conf.CONFIG_SET('HAVE_SYS_UCONTEXT_H') and conf.CONFIG_SET('HAVE_SIGNAL_H'): conf.CHECK_CODE(''' ucontext_t uc; sigaddset(&uc.uc_sigmask, SIGUSR1); ''', 'HAVE_UCONTEXT_T', msg="Checking whether we have ucontext_t", headers='signal.h sys/ucontext.h') # Check for atomic builtins. */ conf.CHECK_CODE(''' int i; (void)__sync_fetch_and_add(&i, 1); ''', 'HAVE___SYNC_FETCH_AND_ADD', msg='Checking for __sync_fetch_and_add compiler builtin') conf.CHECK_CODE(''' int32_t i; atomic_add_32(&i, 1); ''', 'HAVE_ATOMIC_ADD_32', headers='stdint.h sys/atomic.h', msg='Checking for atomic_add_32 compiler builtin') # Check for thread fence. */ tf = conf.CHECK_CODE('atomic_thread_fence(memory_order_seq_cst);', 'HAVE_ATOMIC_THREAD_FENCE', headers='stdatomic.h', msg='Checking for atomic_thread_fence(memory_order_seq_cst) in stdatomic.h') if not tf: tf = conf.CHECK_CODE('__atomic_thread_fence(__ATOMIC_SEQ_CST);', 'HAVE___ATOMIC_THREAD_FENCE', msg='Checking for __atomic_thread_fence(__ATOMIC_SEQ_CST)') if not tf: # __sync_synchronize() is available since 2005 in gcc. tf = conf.CHECK_CODE('__sync_synchronize();', 'HAVE___SYNC_SYNCHRONIZE', msg='Checking for __sync_synchronize') if tf: conf.DEFINE('HAVE_ATOMIC_THREAD_FENCE_SUPPORT', 1) conf.CHECK_CODE(''' #define FALL_THROUGH __attribute__((fallthrough)) enum direction_e { UP = 0, DOWN, }; int main(void) { enum direction_e key = UP; int i = 10; int j = 0; switch (key) { case UP: i = 5; FALL_THROUGH; case DOWN: j = i * 2; break; default: break; } if (j < i) { return 1; } return 0; } ''', 'HAVE_FALLTHROUGH_ATTRIBUTE', addmain=False, strict=True, cflags=['-Werror=missing-declarations'], msg='Checking for fallthrough attribute') # these may be builtins, so we need the link=False strategy conf.CHECK_FUNCS('strdup memmem printf memset memcpy memmove strcpy strncpy bzero', link=False) # See https://bugzilla.samba.org/show_bug.cgi?id=1097 # # Ported in from autoconf where it was added with this commit: # commit 804cfb20a067b4b687089dc72a8271b3abf20f31 # Author: Simo Sorce # Date: Wed Aug 25 14:24:16 2004 +0000 # r2070: Let's try to overload srnlen and strndup for AIX where they are natly broken. host_os = sys.platform if host_os.rfind('aix') > -1: conf.DEFINE('BROKEN_STRNLEN', 1) conf.DEFINE('BROKEN_STRNDUP', 1) conf.CHECK_FUNCS('shl_load shl_unload shl_findsym') conf.CHECK_FUNCS('pipe strftime srandom random srand rand usleep setbuffer') conf.CHECK_FUNCS('lstat getpgrp utime utimes setuid seteuid setreuid setresuid setgid setegid') conf.CHECK_FUNCS('setregid setresgid chroot strerror vsyslog setlinebuf mktime') conf.CHECK_FUNCS('ftruncate chsize rename waitpid wait4') conf.CHECK_FUNCS('initgroups pread pwrite strndup strcasestr strsep') conf.CHECK_FUNCS('strtok_r mkdtemp dup2 dprintf vdprintf isatty chown lchown') conf.CHECK_FUNCS('link readlink symlink realpath snprintf vsnprintf') conf.CHECK_FUNCS('asprintf vasprintf setenv unsetenv strnlen strtoull __strtoull') conf.CHECK_FUNCS('strtouq strtoll __strtoll strtoq memalign posix_memalign') conf.CHECK_FUNCS('fmemopen') if conf.CONFIG_SET('HAVE_MEMALIGN'): conf.CHECK_DECLS('memalign', headers='malloc.h') # glibc up to 2.3.6 had dangerously broken posix_fallocate(). DON'T USE IT. if conf.CHECK_CODE(''' #define _XOPEN_SOURCE 600 #include #if defined(__GLIBC__) && ((__GLIBC__ < 2) || (__GLIBC__ == 2 && __GLIBC_MINOR__ < 4)) #error probably broken posix_fallocate #endif ''', '_POSIX_FALLOCATE_CAPABLE_LIBC', msg='Checking for posix_fallocate-capable libc'): conf.CHECK_FUNCS('posix_fallocate') conf.CHECK_FUNCS('prctl dirname basename') strlcpy_in_bsd = False # libbsd on some platforms provides strlcpy and strlcat if not conf.CHECK_FUNCS('strlcpy strlcat'): if conf.CHECK_FUNCS_IN('strlcpy strlcat', 'bsd', headers='bsd/string.h', checklibc=True): strlcpy_in_bsd = True if not conf.CHECK_FUNCS('getpeereid'): conf.CHECK_FUNCS_IN('getpeereid', 'bsd', headers='sys/types.h bsd/unistd.h') if not conf.CHECK_FUNCS_IN('setproctitle', 'setproctitle', headers='setproctitle.h'): conf.CHECK_FUNCS_IN('setproctitle', 'bsd', headers='sys/types.h bsd/unistd.h') if not conf.CHECK_FUNCS('setproctitle_init'): conf.CHECK_FUNCS_IN('setproctitle_init', 'bsd', headers='sys/types.h bsd/unistd.h') if not conf.CHECK_FUNCS('closefrom'): conf.CHECK_FUNCS_IN('closefrom', 'bsd', headers='bsd/unistd.h') conf.CHECK_CODE(''' struct ucred cred; socklen_t cred_len; int ret = getsockopt(0, SOL_SOCKET, SO_PEERCRED, &cred, &cred_len);''', 'HAVE_PEERCRED', msg="Checking whether we can use SO_PEERCRED to get socket credentials", headers='sys/types.h sys/socket.h') #Some OS (ie. freebsd) return EINVAL if the convertion could not be done, it's not what we expect #Let's detect those cases if conf.CONFIG_SET('HAVE_STRTOLL'): conf.CHECK_CODE(''' long long nb = strtoll("Text", NULL, 0); if (errno == EINVAL) { return 0; } else { return 1; } ''', msg="Checking correct behavior of strtoll", headers = 'errno.h', execute = True, define = 'HAVE_BSD_STRTOLL', ) conf.CHECK_FUNCS('if_nametoindex strerror_r') conf.CHECK_FUNCS('getdirentries getdents syslog') conf.CHECK_FUNCS('gai_strerror get_current_dir_name') conf.CHECK_FUNCS('timegm getifaddrs freeifaddrs mmap setgroups syscall setsid') conf.CHECK_FUNCS('getgrent_r getgrgid_r getgrnam_r getgrouplist getpagesize') conf.CHECK_FUNCS('getpwent_r getpwnam_r getpwuid_r epoll_create') conf.CHECK_FUNCS('port_create') conf.CHECK_FUNCS('getprogname') conf.SET_TARGET_TYPE('attr', 'EMPTY') xattr_headers='sys/attributes.h attr/xattr.h sys/xattr.h' # default to 1, we set it to 0 if we don't find any EA implementation below: conf.DEFINE('HAVE_XATTR_SUPPORT', 1) if conf.CHECK_FUNCS_IN('getxattr', 'attr', checklibc=True, headers=xattr_headers): conf.DEFINE('HAVE_XATTR_XATTR', 1) # Darwin has extra options to xattr-family functions conf.CHECK_CODE('getxattr(NULL, NULL, NULL, 0, 0, 0)', headers=xattr_headers, local_include=False, define='XATTR_ADDITIONAL_OPTIONS', msg="Checking whether xattr interface takes additional options") elif conf.CHECK_FUNCS_IN('attr_listf', 'attr', checklibc=True, headers=xattr_headers): conf.DEFINE('HAVE_XATTR_ATTR', 1) elif conf.CHECK_FUNCS('extattr_list_fd'): conf.DEFINE('HAVE_XATTR_EXTATTR', 1) elif conf.CHECK_FUNCS('flistea'): conf.DEFINE('HAVE_XATTR_EA', 1) elif not conf.CHECK_FUNCS('attropen'): conf.DEFINE('HAVE_XATTR_SUPPORT', 0) conf.CHECK_FUNCS_IN('dlopen dlsym dlerror dlclose', 'dl', checklibc=True, headers='dlfcn.h dl.h') conf.CHECK_C_PROTOTYPE('dlopen', 'void *dlopen(const char* filename, unsigned int flags)', define='DLOPEN_TAKES_UNSIGNED_FLAGS', headers='dlfcn.h dl.h') if conf.CHECK_FUNCS_IN('fdatasync', 'rt', checklibc=True): # some systems are missing the declaration conf.CHECK_DECLS('fdatasync') if conf.CHECK_FUNCS_IN('clock_gettime', 'rt', checklibc=True): for c in ['CLOCK_MONOTONIC', 'CLOCK_PROCESS_CPUTIME_ID', 'CLOCK_REALTIME']: conf.CHECK_CODE(''' #if TIME_WITH_SYS_TIME # include # include #else # if HAVE_SYS_TIME_H # include # else # include # endif #endif clockid_t clk = %s''' % c, 'HAVE_%s' % c, msg='Checking whether the clock_gettime clock ID %s is available' % c) conf.CHECK_TYPE('struct timespec', headers='sys/time.h time.h') # these headers need to be tested as a group on freebsd conf.CHECK_HEADERS(headers='sys/socket.h net/if.h', together=True) conf.CHECK_HEADERS(headers='netinet/in.h arpa/nameser.h resolv.h', together=True) conf.CHECK_FUNCS_IN('res_search', 'resolv', checklibc=True, headers='netinet/in.h arpa/nameser.h resolv.h') # try to find libintl (if --without-gettext is not given) conf.env.intl_libs='' if not Options.options.disable_gettext: conf.CHECK_HEADERS('libintl.h') conf.CHECK_LIB('intl') conf.CHECK_DECLS('dgettext gettext bindtextdomain textdomain bind_textdomain_codeset', headers="libintl.h") # *textdomain functions are not strictly necessary conf.CHECK_FUNCS_IN('bindtextdomain textdomain bind_textdomain_codeset', '', checklibc=True, headers='libintl.h') # gettext and dgettext must exist # on some systems (the ones with glibc, those are in libc) if conf.CHECK_FUNCS_IN('dgettext gettext', '', checklibc=True, headers='libintl.h'): # save for dependency definitions conf.env.intl_libs='' # others (e.g. FreeBSD) have separate libintl elif conf.CHECK_FUNCS_IN('dgettext gettext', 'intl', checklibc=False, headers='libintl.h'): # save for dependency definitions conf.env.intl_libs='intl' # recheck with libintl conf.CHECK_FUNCS_IN('bindtextdomain textdomain bind_textdomain_codeset', 'intl', checklibc=False, headers='libintl.h') else: # Some hosts need lib iconv for linking with lib intl # So we try with flags just in case it helps. oldflags = list(conf.env['EXTRA_LDFLAGS']); conf.env['EXTRA_LDFLAGS'].extend(["-liconv"]) conf.CHECK_FUNCS_IN('dgettext gettext bindtextdomain textdomain bind_textdomain_codeset', 'intl', checklibc=False, headers='libintl.h') conf.env['EXTRA_LDFLAGS'] = oldflags if conf.env['HAVE_GETTEXT'] and conf.env['HAVE_DGETTEXT']: # save for dependency definitions conf.env.intl_libs='iconv intl' # did we find both prototypes and a library to link against? # if not, unset the detected values (see Bug #9911) if not (conf.env['HAVE_GETTEXT'] and conf.env['HAVE_DECL_GETTEXT']): conf.undefine('HAVE_GETTEXT') conf.undefine('HAVE_DECL_GETTEXT') if not (conf.env['HAVE_DGETTEXT'] and conf.env['HAVE_DECL_DGETTEXT']): conf.undefine('HAVE_DGETTEXT') conf.undefine('HAVE_DECL_DGETTEXT') conf.CHECK_FUNCS_IN('pthread_create', 'pthread', checklibc=True, headers='pthread.h') PTHREAD_CFLAGS='error' PTHREAD_LDFLAGS='error' if PTHREAD_LDFLAGS == 'error': if conf.CHECK_FUNCS_IN('pthread_attr_init', 'pthread'): PTHREAD_CFLAGS='-D_REENTRANT -D_POSIX_PTHREAD_SEMANTICS' PTHREAD_LDFLAGS='-lpthread' if PTHREAD_LDFLAGS == 'error': if conf.CHECK_FUNCS_IN('pthread_attr_init', 'pthreads'): PTHREAD_CFLAGS='-D_THREAD_SAFE' PTHREAD_LDFLAGS='-lpthreads' if PTHREAD_LDFLAGS == 'error': if conf.CHECK_FUNCS_IN('pthread_attr_init', 'c_r'): PTHREAD_CFLAGS='-D_THREAD_SAFE -pthread' PTHREAD_LDFLAGS='-pthread' if PTHREAD_LDFLAGS == 'error': if conf.CHECK_FUNCS('pthread_attr_init'): PTHREAD_CFLAGS='-D_REENTRANT' PTHREAD_LDFLAGS='-lpthread' # especially for HP-UX, where the CHECK_FUNC macro fails to test for # pthread_attr_init. On pthread_mutex_lock it works there... if PTHREAD_LDFLAGS == 'error': if conf.CHECK_FUNCS_IN('pthread_mutex_lock', 'pthread'): PTHREAD_CFLAGS='-D_REENTRANT' PTHREAD_LDFLAGS='-lpthread' if PTHREAD_CFLAGS != 'error' and PTHREAD_LDFLAGS != 'error': if conf.CONFIG_SET('replace_add_global_pthread'): conf.ADD_CFLAGS(PTHREAD_CFLAGS) conf.ADD_LDFLAGS(PTHREAD_LDFLAGS) conf.CHECK_HEADERS('pthread.h') conf.DEFINE('HAVE_PTHREAD', '1') if conf.CONFIG_SET('HAVE_PTHREAD'): conf.CHECK_FUNCS_IN('pthread_mutexattr_setrobust', 'pthread', checklibc=True, headers='pthread.h') if not conf.CONFIG_SET('HAVE_PTHREAD_MUTEXATTR_SETROBUST'): conf.CHECK_FUNCS_IN('pthread_mutexattr_setrobust_np', 'pthread', checklibc=True, headers='pthread.h') conf.CHECK_DECLS('PTHREAD_MUTEX_ROBUST', headers='pthread.h') if not conf.CONFIG_SET('HAVE_DECL_PTHREAD_MUTEX_ROBUST'): conf.CHECK_DECLS('PTHREAD_MUTEX_ROBUST_NP', headers='pthread.h') conf.CHECK_FUNCS_IN('pthread_mutex_consistent', 'pthread', checklibc=True, headers='pthread.h') if not conf.CONFIG_SET('HAVE_PTHREAD_MUTEX_CONSISTENT'): conf.CHECK_FUNCS_IN('pthread_mutex_consistent_np', 'pthread', checklibc=True, headers='pthread.h') if ((conf.CONFIG_SET('HAVE_PTHREAD_MUTEXATTR_SETROBUST') or conf.CONFIG_SET('HAVE_PTHREAD_MUTEXATTR_SETROBUST_NP')) and (conf.CONFIG_SET('HAVE_DECL_PTHREAD_MUTEX_ROBUST') or conf.CONFIG_SET('HAVE_DECL_PTHREAD_MUTEX_ROBUST_NP')) and (conf.CONFIG_SET('HAVE_PTHREAD_MUTEX_CONSISTENT') or conf.CONFIG_SET('HAVE_PTHREAD_MUTEX_CONSISTENT_NP'))): conf.DEFINE('HAVE_ROBUST_MUTEXES', 1) # __thread is available since 2002 in gcc. conf.CHECK_CODE(''' __thread int tls; int main(void) { return 0; } ''', 'HAVE___THREAD', addmain=False, msg='Checking for __thread local storage') conf.CHECK_FUNCS_IN('crypt', 'crypt', checklibc=True) conf.CHECK_FUNCS_IN('crypt_r', 'crypt', checklibc=True) conf.CHECK_VARIABLE('rl_event_hook', define='HAVE_DECL_RL_EVENT_HOOK', always=True, headers='readline.h readline/readline.h readline/history.h') conf.CHECK_VARIABLE('program_invocation_short_name', headers='errno.h') conf.CHECK_DECLS('snprintf vsnprintf asprintf vasprintf') conf.CHECK_DECLS('errno', headers='errno.h', reverse=True) conf.CHECK_DECLS('EWOULDBLOCK', headers='errno.h') conf.CHECK_DECLS('environ', reverse=True, headers='unistd.h') conf.CHECK_DECLS('getgrent_r getpwent_r', reverse=True, headers='pwd.h grp.h') conf.CHECK_DECLS('pread pwrite setenv setresgid setresuid', reverse=True) if conf.CONFIG_SET('HAVE_EPOLL_CREATE') and conf.CONFIG_SET('HAVE_SYS_EPOLL_H'): conf.DEFINE('HAVE_EPOLL', 1) if conf.CONFIG_SET('HAVE_PORT_CREATE') and conf.CONFIG_SET('HAVE_PORT_H'): conf.DEFINE('HAVE_SOLARIS_PORTS', 1) if conf.CHECK_FUNCS('eventfd', headers='sys/eventfd.h'): conf.DEFINE('HAVE_EVENTFD', 1) conf.CHECK_HEADERS('poll.h') conf.CHECK_FUNCS('poll') conf.CHECK_FUNCS('strptime') conf.CHECK_DECLS('strptime', headers='time.h') conf.CHECK_CODE('''#define LIBREPLACE_CONFIGURE_TEST_STRPTIME #include "tests/strptime.c"''', define='HAVE_WORKING_STRPTIME', execute=True, addmain=False, msg='Checking for working strptime') conf.CHECK_C_PROTOTYPE('gettimeofday', 'int gettimeofday(struct timeval *tv, struct timezone *tz)', define='HAVE_GETTIMEOFDAY_TZ', headers='sys/time.h') conf.CHECK_C_PROTOTYPE('gettimeofday', 'int gettimeofday(struct timeval *tv, void *tz)', define='HAVE_GETTIMEOFDAY_TZ_VOID', headers='sys/time.h') conf.CHECK_CODE('#include "tests/snprintf.c"', define="HAVE_C99_VSNPRINTF", execute=True, addmain=False, msg="Checking for C99 vsnprintf") conf.CHECK_CODE('#include "tests/shared_mmap.c"', addmain=False, add_headers=False, execute=True, define='HAVE_SHARED_MMAP', msg="Checking for HAVE_SHARED_MMAP") conf.CHECK_CODE('#include "tests/shared_mremap.c"', addmain=False, add_headers=False, execute=True, define='HAVE_MREMAP', msg="Checking for HAVE_MREMAP") # OpenBSD (and I've heard HPUX) doesn't sync between mmap and write. # FIXME: Anything other than a 0 or 1 exit code should abort configure! conf.CHECK_CODE('#include "tests/incoherent_mmap.c"', addmain=False, add_headers=False, execute=True, define='HAVE_INCOHERENT_MMAP', msg="Checking for HAVE_INCOHERENT_MMAP") conf.SAMBA_BUILD_ENV() conf.CHECK_CODE(''' typedef struct {unsigned x;} FOOBAR; #define X_FOOBAR(x) ((FOOBAR) { x }) #define FOO_ONE X_FOOBAR(1) FOOBAR f = FOO_ONE; static const struct { FOOBAR y; } f2[] = { {FOO_ONE} }; static const FOOBAR f3[] = {FOO_ONE}; ''', define='HAVE_IMMEDIATE_STRUCTURES') conf.CHECK_CODE('mkdir("foo",0777)', define='HAVE_MKDIR_MODE', headers='sys/stat.h') conf.CHECK_STRUCTURE_MEMBER('struct stat', 'st_mtim.tv_nsec', define='HAVE_STAT_TV_NSEC', headers='sys/stat.h') # we need the st_rdev test under two names conf.CHECK_STRUCTURE_MEMBER('struct stat', 'st_rdev', define='HAVE_STRUCT_STAT_ST_RDEV', headers='sys/stat.h') conf.CHECK_STRUCTURE_MEMBER('struct stat', 'st_rdev', define='HAVE_ST_RDEV', headers='sys/stat.h') conf.CHECK_STRUCTURE_MEMBER('struct sockaddr_storage', 'ss_family', headers='sys/socket.h netinet/in.h') conf.CHECK_STRUCTURE_MEMBER('struct sockaddr_storage', '__ss_family', headers='sys/socket.h netinet/in.h') if conf.CHECK_STRUCTURE_MEMBER('struct sockaddr', 'sa_len', headers='sys/socket.h netinet/in.h', define='HAVE_SOCKADDR_SA_LEN'): # the old build system produced both defines conf.DEFINE('HAVE_STRUCT_SOCKADDR_SA_LEN', 1) conf.CHECK_STRUCTURE_MEMBER('struct sockaddr_in', 'sin_len', headers='sys/socket.h netinet/in.h', define='HAVE_SOCK_SIN_LEN') conf.CHECK_STRUCTURE_MEMBER('struct sockaddr_in6', 'sin6_len', headers='sys/socket.h netinet/in.h', define='HAVE_SOCK_SIN6_LEN') conf.CHECK_CODE('struct sockaddr_un sunaddr; sunaddr.sun_family = AF_UNIX;', define='HAVE_UNIXSOCKET', headers='sys/socket.h sys/un.h') conf.CHECK_CODE(''' struct stat st; char tpl[20]="/tmp/test.XXXXXX"; char tpl2[20]="/tmp/test.XXXXXX"; int fd = mkstemp(tpl); int fd2 = mkstemp(tpl2); if (fd == -1) { if (fd2 != -1) { unlink(tpl2); } exit(1); } if (fd2 == -1) exit(1); unlink(tpl); unlink(tpl2); if (fstat(fd, &st) != 0) exit(1); if ((st.st_mode & 0777) != 0600) exit(1); if (strcmp(tpl, "/tmp/test.XXXXXX") == 0) { exit(1); } if (strcmp(tpl, tpl2) == 0) { exit(1); } exit(0); ''', define='HAVE_SECURE_MKSTEMP', execute=True, mandatory=True) # lets see if we get a mandatory failure for this one # look for a method of finding the list of network interfaces for method in ['HAVE_IFACE_GETIFADDRS', 'HAVE_IFACE_AIX', 'HAVE_IFACE_IFCONF', 'HAVE_IFACE_IFREQ']: bsd_for_strlcpy = '' if strlcpy_in_bsd: bsd_for_strlcpy = ' bsd' if conf.CHECK_CODE(''' #define %s 1 #define NO_CONFIG_H 1 #define AUTOCONF_TEST 1 #include "replace.c" #include "inet_ntop.c" #include "snprintf.c" #include "getifaddrs.c" #define getifaddrs_test main #include "tests/getifaddrs.c" ''' % method, method, lib='nsl socket' + bsd_for_strlcpy, addmain=False, execute=True): break conf.RECURSE('system') conf.SAMBA_CONFIG_H() if conf.CHECK_FUNCS('strerror_r'): # Check if strerror_r is XSI-Compatable, the default GNU implementation # is not conf.CHECK_CODE('int strerror_r(int errnum, char *buf, size_t buflen);', 'STRERROR_R_XSI_NOT_GNU', headers='string.h', addmain=False, link=False, msg="Checking for XSI (rather than GNU) prototype for strerror_r") REPLACEMENT_FUNCTIONS = { 'replace.c': ['ftruncate', 'strlcpy', 'strlcat', 'mktime', 'initgroups', 'memmove', 'strdup', 'setlinebuf', 'vsyslog', 'strnlen', 'strndup', 'waitpid', 'seteuid', 'setegid', 'chroot', 'mkstemp', 'mkdtemp', 'pread', 'pwrite', 'strcasestr', 'strsep', 'strtok_r', 'strtoll', 'strtoull', 'setenv', 'unsetenv', 'utime', 'utimes', 'dup2', 'chown', 'link', 'readlink', 'symlink', 'lchown', 'realpath', 'memmem', 'vdprintf', 'dprintf', 'get_current_dir_name', 'strerror_r', 'clock_gettime', 'memset_s'], 'timegm.c': ['timegm'], # Note: C99_VSNPRINTF is not a function, but a special condition # for replacement 'snprintf.c': ['C99_VSNPRINTF', 'snprintf', 'vsnprintf', 'asprintf', 'vasprintf'], # Note: WORKING_STRPTIME is not a function, but a special condition # for replacement 'strptime.c': ['WORKING_STRPTIME', 'strptime'], } def build(bld): bld.RECURSE('buildtools/wafsamba') REPLACE_HOSTCC_SOURCE = '' for filename in REPLACEMENT_FUNCTIONS.keys(): for function in REPLACEMENT_FUNCTIONS[filename]: if not bld.CONFIG_SET('HAVE_%s' % function.upper()): REPLACE_HOSTCC_SOURCE += ' %s' % filename break extra_libs = '' if bld.CONFIG_SET('HAVE_LIBBSD'): extra_libs += ' bsd' bld.SAMBA_SUBSYSTEM('LIBREPLACE_HOSTCC', REPLACE_HOSTCC_SOURCE, use_hostcc=True, use_global_deps=False, cflags='-D_SAMBA_HOSTCC_', group='compiler_libraries', deps = extra_libs ) REPLACE_SOURCE = REPLACE_HOSTCC_SOURCE REPLACE_SOURCE += ' cwrap.c' if not bld.CONFIG_SET('HAVE_CRYPT'): REPLACE_SOURCE += ' crypt.c' if not bld.CONFIG_SET('HAVE_DLOPEN'): REPLACE_SOURCE += ' dlfcn.c' if not bld.CONFIG_SET('HAVE_POLL'): REPLACE_SOURCE += ' poll.c' if not bld.CONFIG_SET('HAVE_SOCKETPAIR'): REPLACE_SOURCE += ' socketpair.c' if not bld.CONFIG_SET('HAVE_CONNECT'): REPLACE_SOURCE += ' socket.c' if not bld.CONFIG_SET('HAVE_GETIFADDRS'): REPLACE_SOURCE += ' getifaddrs.c' if not bld.CONFIG_SET('HAVE_GETADDRINFO'): REPLACE_SOURCE += ' getaddrinfo.c' if not bld.CONFIG_SET('HAVE_INET_NTOA'): REPLACE_SOURCE += ' inet_ntoa.c' if not bld.CONFIG_SET('HAVE_INET_ATON'): REPLACE_SOURCE += ' inet_aton.c' if not bld.CONFIG_SET('HAVE_INET_NTOP'): REPLACE_SOURCE += ' inet_ntop.c' if not bld.CONFIG_SET('HAVE_INET_PTON'): REPLACE_SOURCE += ' inet_pton.c' if not bld.CONFIG_SET('HAVE_GETXATTR') or bld.CONFIG_SET('XATTR_ADDITIONAL_OPTIONS'): REPLACE_SOURCE += ' xattr.c' if not bld.CONFIG_SET('HAVE_CLOSEFROM'): REPLACE_SOURCE += ' closefrom.c' bld.SAMBA_LIBRARY('replace', source=REPLACE_SOURCE, group='base_libraries', # FIXME: Ideally symbols should be hidden here so they # don't appear in the global namespace when Samba # libraries are loaded, but this doesn't appear to work # at the moment: # hide_symbols=bld.BUILTIN_LIBRARY('replace'), private_library=True, deps='crypt dl nsl socket rt attr' + extra_libs) replace_test_cflags = '' if bld.CONFIG_SET('HAVE_WNO_FORMAT_TRUNCATION'): replace_test_cflags += " -Wno-format-truncation" bld.SAMBA_SUBSYSTEM('replace-test', source='''tests/testsuite.c tests/strptime.c tests/os2_delete.c tests/getifaddrs.c''', deps='replace', cflags=replace_test_cflags) bld.SAMBA_BINARY('replace_testsuite', source='tests/main.c', deps='replace replace-test', install=False) # build replacements for stdint.h and stdbool.h if needed bld.SAMBA_GENERATOR('replace_stdint_h', rule='cp ${SRC} ${TGT}', source='hdr_replace.h', target='stdint.h', enabled = not bld.CONFIG_SET('HAVE_STDINT_H')) bld.SAMBA_GENERATOR('replace_stdbool_h', rule='cp ${SRC} ${TGT}', source='hdr_replace.h', target='stdbool.h', enabled = not bld.CONFIG_SET('HAVE_STDBOOL_H')) bld.SAMBA_SUBSYSTEM('samba_intl', source='', use_global_deps=False,deps=bld.env.intl_libs) def testonly(ctx): '''run talloc testsuite''' import samba_utils samba_utils.ADD_LD_LIBRARY_PATH('bin/shared') samba_utils.ADD_LD_LIBRARY_PATH('bin/shared/private') cmd = os.path.join(Context.g_module.out, 'replace_testsuite') ret = samba_utils.RUN_COMMAND(cmd) print("testsuite returned %d" % ret) sys.exit(ret) # WAF doesn't build the unit tests for this, maybe because they don't link with talloc? # This forces it def test(ctx): Options.commands.append('build') Options.commands.append('testonly') def dist(): '''makes a tarball for distribution''' samba_dist.dist() tdb-1.4.2/lib/replace/xattr.c0000660000000000000000000005102413444661620015722 0ustar rootroot00000000000000/* Unix SMB/CIFS implementation. replacement routines for xattr implementations Copyright (C) Jeremy Allison 1998-2005 Copyright (C) Timur Bakeyev 2005 Copyright (C) Bjoern Jacke 2006-2007 Copyright (C) Herb Lewis 2003 Copyright (C) Andrew Bartlett 2012 ** NOTE! The following LGPL license applies to the replace ** library. This does NOT imply that all of Samba is released ** under the LGPL This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, see . */ #define UID_WRAPPER_NOT_REPLACE #include "replace.h" #include "system/filesys.h" #include "system/dir.h" /******** Solaris EA helper function prototypes ********/ #ifdef HAVE_ATTROPEN #define SOLARIS_ATTRMODE S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP static int solaris_write_xattr(int attrfd, const char *value, size_t size); static ssize_t solaris_read_xattr(int attrfd, void *value, size_t size); static ssize_t solaris_list_xattr(int attrdirfd, char *list, size_t size); static int solaris_unlinkat(int attrdirfd, const char *name); static int solaris_attropen(const char *path, const char *attrpath, int oflag, mode_t mode); static int solaris_openat(int fildes, const char *path, int oflag, mode_t mode); #endif /************************************************************************** Wrappers for extented attribute calls. Based on the Linux package with support for IRIX and (Net|Free)BSD also. Expand as other systems have them. ****************************************************************************/ ssize_t rep_getxattr (const char *path, const char *name, void *value, size_t size) { #if defined(HAVE_XATTR_XATTR) #ifndef XATTR_ADDITIONAL_OPTIONS return getxattr(path, name, value, size); #else /* So that we do not recursivly call this function */ #undef getxattr int options = 0; return getxattr(path, name, value, size, 0, options); #endif #elif defined(HAVE_XATTR_EA) return getea(path, name, value, size); #elif defined(HAVE_XATTR_EXTATTR) ssize_t retval; int attrnamespace; const char *attrname; if (strncmp(name, "system.", 7) == 0) { attrnamespace = EXTATTR_NAMESPACE_SYSTEM; attrname = name + 7; } else if (strncmp(name, "user.", 5) == 0) { attrnamespace = EXTATTR_NAMESPACE_USER; attrname = name + 5; } else { errno = EINVAL; return -1; } /* * The BSD implementation has a nasty habit of silently truncating * the returned value to the size of the buffer, so we have to check * that the buffer is large enough to fit the returned value. */ if((retval=extattr_get_file(path, attrnamespace, attrname, NULL, 0)) >= 0) { if (size == 0) { return retval; } else if (retval > size) { errno = ERANGE; return -1; } if((retval=extattr_get_file(path, attrnamespace, attrname, value, size)) >= 0) return retval; } return -1; #elif defined(HAVE_XATTR_ATTR) int retval, flags = 0; int valuelength = (int)size; char *attrname = strchr(name,'.') + 1; if (strncmp(name, "system", 6) == 0) flags |= ATTR_ROOT; retval = attr_get(path, attrname, (char *)value, &valuelength, flags); if (size == 0 && retval == -1 && errno == E2BIG) { return valuelength; } return retval ? retval : valuelength; #elif defined(HAVE_ATTROPEN) ssize_t ret = -1; int attrfd = solaris_attropen(path, name, O_RDONLY, 0); if (attrfd >= 0) { ret = solaris_read_xattr(attrfd, value, size); close(attrfd); } return ret; #else errno = ENOSYS; return -1; #endif } ssize_t rep_fgetxattr (int filedes, const char *name, void *value, size_t size) { #if defined(HAVE_XATTR_XATTR) #ifndef XATTR_ADDITIONAL_OPTIONS return fgetxattr(filedes, name, value, size); #else /* So that we do not recursivly call this function */ #undef fgetxattr int options = 0; return fgetxattr(filedes, name, value, size, 0, options); #endif #elif defined(HAVE_XATTR_EA) return fgetea(filedes, name, value, size); #elif defined(HAVE_XATTR_EXTATTR) ssize_t retval; int attrnamespace; const char *attrname; if (strncmp(name, "system.", 7) == 0) { attrnamespace = EXTATTR_NAMESPACE_SYSTEM; attrname = name + 7; } else if (strncmp(name, "user.", 5) == 0) { attrnamespace = EXTATTR_NAMESPACE_USER; attrname = name + 5; } else { errno = EINVAL; return -1; } if((retval=extattr_get_fd(filedes, attrnamespace, attrname, NULL, 0)) >= 0) { if (size == 0) { return retval; } else if (retval > size) { errno = ERANGE; return -1; } if((retval=extattr_get_fd(filedes, attrnamespace, attrname, value, size)) >= 0) return retval; } return -1; #elif defined(HAVE_XATTR_ATTR) int retval, flags = 0; int valuelength = (int)size; char *attrname = strchr(name,'.') + 1; if (strncmp(name, "system", 6) == 0) flags |= ATTR_ROOT; retval = attr_getf(filedes, attrname, (char *)value, &valuelength, flags); if (size == 0 && retval == -1 && errno == E2BIG) { return valuelength; } return retval ? retval : valuelength; #elif defined(HAVE_ATTROPEN) ssize_t ret = -1; int attrfd = solaris_openat(filedes, name, O_RDONLY|O_XATTR, 0); if (attrfd >= 0) { ret = solaris_read_xattr(attrfd, value, size); close(attrfd); } return ret; #else errno = ENOSYS; return -1; #endif } #if defined(HAVE_XATTR_EXTATTR) #define EXTATTR_PREFIX(s) (s), (sizeof((s))-1) static struct { int space; const char *name; size_t len; } extattr[] = { { EXTATTR_NAMESPACE_SYSTEM, EXTATTR_PREFIX("system.") }, { EXTATTR_NAMESPACE_USER, EXTATTR_PREFIX("user.") }, }; typedef union { const char *path; int filedes; } extattr_arg; static ssize_t bsd_attr_list (int type, extattr_arg arg, char *list, size_t size) { ssize_t list_size, total_size = 0; int i, t, len; char *buf; /* Iterate through extattr(2) namespaces */ for(t = 0; t < ARRAY_SIZE(extattr); t++) { if (t != EXTATTR_NAMESPACE_USER && geteuid() != 0) { /* ignore all but user namespace when we are not root, see bug 10247 */ continue; } switch(type) { case 0: list_size = extattr_list_file(arg.path, extattr[t].space, list, size); break; case 1: list_size = extattr_list_link(arg.path, extattr[t].space, list, size); break; case 2: list_size = extattr_list_fd(arg.filedes, extattr[t].space, list, size); break; default: errno = ENOSYS; return -1; } /* Some error happend. Errno should be set by the previous call */ if(list_size < 0) return -1; /* No attributes */ if(list_size == 0) continue; /* XXX: Call with an empty buffer may be used to calculate necessary buffer size. Unfortunately, we can't say, how many attributes were returned, so here is the potential problem with the emulation. */ if(list == NULL) { /* Take the worse case of one char attribute names - two bytes per name plus one more for sanity. */ total_size += list_size + (list_size/2 + 1)*extattr[t].len; continue; } /* Count necessary offset to fit namespace prefixes */ len = 0; for(i = 0; i < list_size; i += list[i] + 1) len += extattr[t].len; total_size += list_size + len; /* Buffer is too small to fit the results */ if(total_size > size) { errno = ERANGE; return -1; } /* Shift results back, so we can prepend prefixes */ buf = (char *)memmove(list + len, list, list_size); for(i = 0; i < list_size; i += len + 1) { len = buf[i]; strncpy(list, extattr[t].name, extattr[t].len + 1); list += extattr[t].len; strncpy(list, buf + i + 1, len); list[len] = '\0'; list += len + 1; } size -= total_size; } return total_size; } #endif #if defined(HAVE_XATTR_ATTR) && (defined(HAVE_SYS_ATTRIBUTES_H) || defined(HAVE_ATTR_ATTRIBUTES_H)) static char attr_buffer[ATTR_MAX_VALUELEN]; static ssize_t irix_attr_list(const char *path, int filedes, char *list, size_t size, int flags) { int retval = 0, index; attrlist_cursor_t *cursor = 0; int total_size = 0; attrlist_t * al = (attrlist_t *)attr_buffer; attrlist_ent_t *ae; size_t ent_size, left = size; char *bp = list; while (true) { if (filedes) retval = attr_listf(filedes, attr_buffer, ATTR_MAX_VALUELEN, flags, cursor); else retval = attr_list(path, attr_buffer, ATTR_MAX_VALUELEN, flags, cursor); if (retval) break; for (index = 0; index < al->al_count; index++) { ae = ATTR_ENTRY(attr_buffer, index); ent_size = strlen(ae->a_name) + sizeof("user."); if (left >= ent_size) { strncpy(bp, "user.", sizeof("user.")); strncat(bp, ae->a_name, ent_size - sizeof("user.")); bp += ent_size; left -= ent_size; } else if (size) { errno = ERANGE; retval = -1; break; } total_size += ent_size; } if (al->al_more == 0) break; } if (retval == 0) { flags |= ATTR_ROOT; cursor = 0; while (true) { if (filedes) retval = attr_listf(filedes, attr_buffer, ATTR_MAX_VALUELEN, flags, cursor); else retval = attr_list(path, attr_buffer, ATTR_MAX_VALUELEN, flags, cursor); if (retval) break; for (index = 0; index < al->al_count; index++) { ae = ATTR_ENTRY(attr_buffer, index); ent_size = strlen(ae->a_name) + sizeof("system."); if (left >= ent_size) { strncpy(bp, "system.", sizeof("system.")); strncat(bp, ae->a_name, ent_size - sizeof("system.")); bp += ent_size; left -= ent_size; } else if (size) { errno = ERANGE; retval = -1; break; } total_size += ent_size; } if (al->al_more == 0) break; } } return (ssize_t)(retval ? retval : total_size); } #endif ssize_t rep_listxattr (const char *path, char *list, size_t size) { #if defined(HAVE_XATTR_XATTR) #ifndef XATTR_ADDITIONAL_OPTIONS return listxattr(path, list, size); #else /* So that we do not recursivly call this function */ #undef listxattr int options = 0; return listxattr(path, list, size, options); #endif #elif defined(HAVE_XATTR_EA) return listea(path, list, size); #elif defined(HAVE_XATTR_EXTATTR) extattr_arg arg; arg.path = path; return bsd_attr_list(0, arg, list, size); #elif defined(HAVE_XATTR_ATTR) && defined(HAVE_SYS_ATTRIBUTES_H) return irix_attr_list(path, 0, list, size, 0); #elif defined(HAVE_ATTROPEN) ssize_t ret = -1; int attrdirfd = solaris_attropen(path, ".", O_RDONLY, 0); if (attrdirfd >= 0) { ret = solaris_list_xattr(attrdirfd, list, size); close(attrdirfd); } return ret; #else errno = ENOSYS; return -1; #endif } ssize_t rep_flistxattr (int filedes, char *list, size_t size) { #if defined(HAVE_XATTR_XATTR) #ifndef XATTR_ADDITIONAL_OPTIONS return flistxattr(filedes, list, size); #else /* So that we do not recursivly call this function */ #undef flistxattr int options = 0; return flistxattr(filedes, list, size, options); #endif #elif defined(HAVE_XATTR_EA) return flistea(filedes, list, size); #elif defined(HAVE_XATTR_EXTATTR) extattr_arg arg; arg.filedes = filedes; return bsd_attr_list(2, arg, list, size); #elif defined(HAVE_XATTR_ATTR) return irix_attr_list(NULL, filedes, list, size, 0); #elif defined(HAVE_ATTROPEN) ssize_t ret = -1; int attrdirfd = solaris_openat(filedes, ".", O_RDONLY|O_XATTR, 0); if (attrdirfd >= 0) { ret = solaris_list_xattr(attrdirfd, list, size); close(attrdirfd); } return ret; #else errno = ENOSYS; return -1; #endif } int rep_removexattr (const char *path, const char *name) { #if defined(HAVE_XATTR_XATTR) #ifndef XATTR_ADDITIONAL_OPTIONS return removexattr(path, name); #else /* So that we do not recursivly call this function */ #undef removexattr int options = 0; return removexattr(path, name, options); #endif #elif defined(HAVE_XATTR_EA) return removeea(path, name); #elif defined(HAVE_XATTR_EXTATTR) int attrnamespace; const char *attrname; if (strncmp(name, "system.", 7) == 0) { attrnamespace = EXTATTR_NAMESPACE_SYSTEM; attrname = name + 7; } else if (strncmp(name, "user.", 5) == 0) { attrnamespace = EXTATTR_NAMESPACE_USER; attrname = name + 5; } else { errno = EINVAL; return -1; } return extattr_delete_file(path, attrnamespace, attrname); #elif defined(HAVE_XATTR_ATTR) int flags = 0; char *attrname = strchr(name,'.') + 1; if (strncmp(name, "system", 6) == 0) flags |= ATTR_ROOT; return attr_remove(path, attrname, flags); #elif defined(HAVE_ATTROPEN) int ret = -1; int attrdirfd = solaris_attropen(path, ".", O_RDONLY, 0); if (attrdirfd >= 0) { ret = solaris_unlinkat(attrdirfd, name); close(attrdirfd); } return ret; #else errno = ENOSYS; return -1; #endif } int rep_fremovexattr (int filedes, const char *name) { #if defined(HAVE_XATTR_XATTR) #ifndef XATTR_ADDITIONAL_OPTIONS return fremovexattr(filedes, name); #else /* So that we do not recursivly call this function */ #undef fremovexattr int options = 0; return fremovexattr(filedes, name, options); #endif #elif defined(HAVE_XATTR_EA) return fremoveea(filedes, name); #elif defined(HAVE_XATTR_EXTATTR) int attrnamespace; const char *attrname; if (strncmp(name, "system.", 7) == 0) { attrnamespace = EXTATTR_NAMESPACE_SYSTEM; attrname = name + 7; } else if (strncmp(name, "user.", 5) == 0) { attrnamespace = EXTATTR_NAMESPACE_USER; attrname = name + 5; } else { errno = EINVAL; return -1; } return extattr_delete_fd(filedes, attrnamespace, attrname); #elif defined(HAVE_XATTR_ATTR) int flags = 0; char *attrname = strchr(name,'.') + 1; if (strncmp(name, "system", 6) == 0) flags |= ATTR_ROOT; return attr_removef(filedes, attrname, flags); #elif defined(HAVE_ATTROPEN) int ret = -1; int attrdirfd = solaris_openat(filedes, ".", O_RDONLY|O_XATTR, 0); if (attrdirfd >= 0) { ret = solaris_unlinkat(attrdirfd, name); close(attrdirfd); } return ret; #else errno = ENOSYS; return -1; #endif } int rep_setxattr (const char *path, const char *name, const void *value, size_t size, int flags) { #if defined(HAVE_XATTR_XATTR) #ifndef XATTR_ADDITIONAL_OPTIONS return setxattr(path, name, value, size, flags); #else /* So that we do not recursivly call this function */ #undef setxattr int options = 0; return setxattr(path, name, value, size, 0, options); #endif #elif defined(HAVE_XATTR_EA) return setea(path, name, value, size, flags); #elif defined(HAVE_XATTR_EXTATTR) int retval = 0; int attrnamespace; const char *attrname; if (strncmp(name, "system.", 7) == 0) { attrnamespace = EXTATTR_NAMESPACE_SYSTEM; attrname = name + 7; } else if (strncmp(name, "user.", 5) == 0) { attrnamespace = EXTATTR_NAMESPACE_USER; attrname = name + 5; } else { errno = EINVAL; return -1; } if (flags) { /* Check attribute existence */ retval = extattr_get_file(path, attrnamespace, attrname, NULL, 0); if (retval < 0) { /* REPLACE attribute, that doesn't exist */ if (flags & XATTR_REPLACE && errno == ENOATTR) { errno = ENOATTR; return -1; } /* Ignore other errors */ } else { /* CREATE attribute, that already exists */ if (flags & XATTR_CREATE) { errno = EEXIST; return -1; } } } retval = extattr_set_file(path, attrnamespace, attrname, value, size); return (retval < 0) ? -1 : 0; #elif defined(HAVE_XATTR_ATTR) int myflags = 0; char *attrname = strchr(name,'.') + 1; if (strncmp(name, "system", 6) == 0) myflags |= ATTR_ROOT; if (flags & XATTR_CREATE) myflags |= ATTR_CREATE; if (flags & XATTR_REPLACE) myflags |= ATTR_REPLACE; return attr_set(path, attrname, (const char *)value, size, myflags); #elif defined(HAVE_ATTROPEN) int ret = -1; int myflags = O_RDWR; int attrfd; if (flags & XATTR_CREATE) myflags |= O_EXCL; if (!(flags & XATTR_REPLACE)) myflags |= O_CREAT; attrfd = solaris_attropen(path, name, myflags, (mode_t) SOLARIS_ATTRMODE); if (attrfd >= 0) { ret = solaris_write_xattr(attrfd, value, size); close(attrfd); } return ret; #else errno = ENOSYS; return -1; #endif } int rep_fsetxattr (int filedes, const char *name, const void *value, size_t size, int flags) { #if defined(HAVE_XATTR_XATTR) #ifndef XATTR_ADDITIONAL_OPTIONS return fsetxattr(filedes, name, value, size, flags); #else /* So that we do not recursivly call this function */ #undef fsetxattr int options = 0; return fsetxattr(filedes, name, value, size, 0, options); #endif #elif defined(HAVE_XATTR_EA) return fsetea(filedes, name, value, size, flags); #elif defined(HAVE_XATTR_EXTATTR) int retval = 0; int attrnamespace; const char *attrname; if (strncmp(name, "system.", 7) == 0) { attrnamespace = EXTATTR_NAMESPACE_SYSTEM; attrname = name + 7; } else if (strncmp(name, "user.", 5) == 0) { attrnamespace = EXTATTR_NAMESPACE_USER; attrname = name + 5; } else { errno = EINVAL; return -1; } if (flags) { /* Check attribute existence */ retval = extattr_get_fd(filedes, attrnamespace, attrname, NULL, 0); if (retval < 0) { /* REPLACE attribute, that doesn't exist */ if (flags & XATTR_REPLACE && errno == ENOATTR) { errno = ENOATTR; return -1; } /* Ignore other errors */ } else { /* CREATE attribute, that already exists */ if (flags & XATTR_CREATE) { errno = EEXIST; return -1; } } } retval = extattr_set_fd(filedes, attrnamespace, attrname, value, size); return (retval < 0) ? -1 : 0; #elif defined(HAVE_XATTR_ATTR) int myflags = 0; char *attrname = strchr(name,'.') + 1; if (strncmp(name, "system", 6) == 0) myflags |= ATTR_ROOT; if (flags & XATTR_CREATE) myflags |= ATTR_CREATE; if (flags & XATTR_REPLACE) myflags |= ATTR_REPLACE; return attr_setf(filedes, attrname, (const char *)value, size, myflags); #elif defined(HAVE_ATTROPEN) int ret = -1; int myflags = O_RDWR | O_XATTR; int attrfd; if (flags & XATTR_CREATE) myflags |= O_EXCL; if (!(flags & XATTR_REPLACE)) myflags |= O_CREAT; attrfd = solaris_openat(filedes, name, myflags, (mode_t) SOLARIS_ATTRMODE); if (attrfd >= 0) { ret = solaris_write_xattr(attrfd, value, size); close(attrfd); } return ret; #else errno = ENOSYS; return -1; #endif } /************************************************************************** helper functions for Solaris' EA support ****************************************************************************/ #ifdef HAVE_ATTROPEN static ssize_t solaris_read_xattr(int attrfd, void *value, size_t size) { struct stat sbuf; if (fstat(attrfd, &sbuf) == -1) { errno = ENOATTR; return -1; } /* This is to return the current size of the named extended attribute */ if (size == 0) { return sbuf.st_size; } /* check size and read xattr */ if (sbuf.st_size > size) { errno = ERANGE; return -1; } return read(attrfd, value, sbuf.st_size); } static ssize_t solaris_list_xattr(int attrdirfd, char *list, size_t size) { ssize_t len = 0; DIR *dirp; struct dirent *de; int newfd = dup(attrdirfd); /* CAUTION: The originating file descriptor should not be used again following the call to fdopendir(). For that reason we dup() the file descriptor here to make things more clear. */ dirp = fdopendir(newfd); while ((de = readdir(dirp))) { size_t listlen = strlen(de->d_name) + 1; if (!strcmp(de->d_name, ".") || !strcmp(de->d_name, "..")) { /* we don't want "." and ".." here: */ continue; } if (size == 0) { /* return the current size of the list of extended attribute names*/ len += listlen; } else { /* check size and copy entrieÑ• + nul into list. */ if ((len + listlen) > size) { errno = ERANGE; len = -1; break; } else { strlcpy(list + len, de->d_name, listlen); len += listlen; } } } if (closedir(dirp) == -1) { return -1; } return len; } static int solaris_unlinkat(int attrdirfd, const char *name) { if (unlinkat(attrdirfd, name, 0) == -1) { if (errno == ENOENT) { errno = ENOATTR; } return -1; } return 0; } static int solaris_attropen(const char *path, const char *attrpath, int oflag, mode_t mode) { int filedes = attropen(path, attrpath, oflag, mode); if (filedes == -1) { if (errno == EINVAL) { errno = ENOTSUP; } else { errno = ENOATTR; } } return filedes; } static int solaris_openat(int fildes, const char *path, int oflag, mode_t mode) { int filedes = openat(fildes, path, oflag, mode); if (filedes == -1) { if (errno == EINVAL) { errno = ENOTSUP; } else { errno = ENOATTR; } } return filedes; } static int solaris_write_xattr(int attrfd, const char *value, size_t size) { if ((ftruncate(attrfd, 0) == 0) && (write(attrfd, value, size) == size)) { return 0; } else { return -1; } } #endif /*HAVE_ATTROPEN*/ tdb-1.4.2/buildtools/README0000660000000000000000000000056212406075657015302 0ustar rootroot00000000000000See http://code.google.com/p/waf/ for more information on waf You can get a svn copy of the upstream source with: svn checkout http://waf.googlecode.com/svn/trunk/ waf-read-only Samba currently uses waf 1.5, which can be found at: http://waf.googlecode.com/svn/branches/waf-1.5 To update the current copy of waf, use the update-waf.sh script in this directory. tdb-1.4.2/buildtools/bin/waf0000770000000000000000000001037413527011454015664 0ustar rootroot00000000000000#!/usr/bin/env python3 # encoding: latin-1 # Thomas Nagy, 2005-2018 # """ Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ import os, sys, inspect VERSION="2.0.17" REVISION="x" GIT="x" INSTALL="x" C1='x' C2='x' C3='x' cwd = os.getcwd() join = os.path.join if sys.hexversion<0x206000f: raise ImportError('Python >= 2.6 is required to create the waf file') WAF='waf' def b(x): return x if sys.hexversion>0x300000f: WAF='waf3' def b(x): return x.encode() def err(m): print(('\033[91mError: %s\033[0m' % m)) sys.exit(1) def unpack_wafdir(dir, src): f = open(src,'rb') c = 'corrupt archive (%d)' while 1: line = f.readline() if not line: err('run waf-light from a folder containing waflib') if line == b('#==>\n'): txt = f.readline() if not txt: err(c % 1) if f.readline() != b('#<==\n'): err(c % 2) break if not txt: err(c % 3) txt = txt[1:-1].replace(b(C1), b('\n')).replace(b(C2), b('\r')).replace(b(C3), b('\x00')) import shutil, tarfile try: shutil.rmtree(dir) except OSError: pass try: for x in ('Tools', 'extras'): os.makedirs(join(dir, 'waflib', x)) except OSError: err("Cannot unpack waf lib into %s\nMove waf in a writable directory" % dir) os.chdir(dir) tmp = 't.bz2' t = open(tmp,'wb') try: t.write(txt) finally: t.close() try: t = tarfile.open(tmp) except: try: os.system('bunzip2 t.bz2') t = tarfile.open('t') tmp = 't' except: os.chdir(cwd) try: shutil.rmtree(dir) except OSError: pass err("Waf cannot be unpacked, check that bzip2 support is present") try: for x in t: t.extract(x) finally: t.close() for x in ('Tools', 'extras'): os.chmod(join('waflib',x), 493) if sys.hexversion<0x300000f: sys.path = [join(dir, 'waflib')] + sys.path import fixpy2 fixpy2.fixdir(dir) os.remove(tmp) os.chdir(cwd) try: dir = unicode(dir, 'mbcs') except: pass try: from ctypes import windll windll.kernel32.SetFileAttributesW(dir, 2) except: pass def test(dir): try: os.stat(join(dir, 'waflib')) return os.path.abspath(dir) except OSError: pass def find_lib(): path = '../../third_party/waf' paths = [path, path+'/waflib'] return [os.path.abspath(os.path.join(os.path.dirname(__file__), x)) for x in paths] wafdir = find_lib() for p in wafdir: sys.path.insert(0, p) if __name__ == '__main__': #import extras.compat15#PRELUDE import sys from waflib.Tools import ccroot, c, ar, compiler_c, gcc sys.modules['cc'] = c sys.modules['ccroot'] = ccroot sys.modules['ar'] = ar sys.modules['compiler_cc'] = compiler_c sys.modules['gcc'] = gcc from waflib import Options Options.lockfile = os.environ.get('WAFLOCK', '.lock-wscript') if os.path.isfile(Options.lockfile) and os.stat(Options.lockfile).st_size == 0: os.environ['NOCLIMB'] = "1" # there is a single top-level, but libraries must build independently os.environ['NO_LOCK_IN_TOP'] = "1" from waflib import Task class o(object): display = None Task.classes['cc_link'] = o from waflib import Scripting Scripting.waf_entry_point(cwd, VERSION, wafdir[0]) tdb-1.4.2/buildtools/compare_config_h4.sh0000770000000000000000000000050212406075657020320 0ustar rootroot00000000000000#!/bin/sh # compare the generated config.h from a waf build with existing samba # build grep "^.define" bin/default/source4/include/config.h | sort > waf-config.h grep "^.define" $HOME/samba_old/source4/include/config.h | sort > old-config.h comm -23 old-config.h waf-config.h #echo #diff -u old-config.h waf-config.h tdb-1.4.2/buildtools/compare_generated.sh0000770000000000000000000000251212406075657020421 0ustar rootroot00000000000000#!/bin/sh # compare the generated files from a waf old_build=$HOME/samba_old gen_files=$(cd bin/default && find . -type f -name '*.[ch]') 2>&1 strip_file() { in_file=$1 out_file=$2 cat $in_file | grep -v 'The following definitions come from' | grep -v 'Automatically generated at' | grep -v 'Generated from' | sed 's|/home/tnagy/samba/source4||g' | sed 's|/home/tnagy/samba/|../|g' | sed 's|bin/default/source4/||g' | sed 's|bin/default/|../|g' | sed 's/define _____/define ___/g' | sed 's/define __*/define _/g' | sed 's/define _DEFAULT_/define _/g' | sed 's/define _SOURCE4_/define ___/g' | sed 's/define ___/define _/g' | sed 's/ifndef ___/ifndef _/g' | sed 's|endif /* ____|endif /* __|g' | sed s/__DEFAULT_SOURCE4/__/ | sed s/__DEFAULT_SOURCE4/__/ | sed s/__DEFAULT/____/ > $out_file } compare_file() { f=$f bname=$(basename $f) t1=/tmp/$bname.old.$$ t2=/tmp/$bname.new.$$ strip_file $old_build/$f $t1 strip_file bin/default/$f $t2 diff -u -b $t1 $t2 2>&1 rm -f $t1 $t2 } for f in $gen_files; do compare_file $f done tdb-1.4.2/buildtools/compare_install.sh0000770000000000000000000000021212406075657020124 0ustar rootroot00000000000000#!/bin/sh prefix1="$1" prefix2="$2" (cd $prefix1 && find . ) | sort > p1.txt (cd $prefix2 && find . ) | sort > p2.txt diff -u p[12].txt tdb-1.4.2/buildtools/examples/run_on_target.py0000770000000000000000000001162113444661620021452 0ustar rootroot00000000000000#!/usr/bin/env python3 # # Sample run-on-target script # This is a script that can be used as cross-execute parameter to samba # configuration process, running the command on a remote target for which # the cross-compiled configure test was compiled. # # To use: # ./configure \ # --cross-compile \ # '--cross-execute=./buildtools/example/run_on_target.py --host=' # # A more elaborate example: # ./configure \ # --cross-compile \ # '--cross-execute=./buildtools/example/run_on_target.py --host= --user= "--ssh=ssh -i " --destdir=/path/to/dir' # # Typically this is to be used also with --cross-answers, so that the # cross answers file gets built and further builds can be made without # the help of a remote target. # # The following assumptions are made: # 1. rsync is available on build machine and target machine # 2. A running ssh service on target machine with password-less shell login # 3. A directory writable by the password-less login user # 4. The tests on the target can run and provide reliable results # from the login account's home directory. This is significant # for example in locking tests which # create files in the current directory. As a workaround to this # assumption, the TESTDIR environment variable can be set on the target # (using ssh command line or server config) and the tests shall # chdir to that directory. # import sys import os import subprocess from optparse import OptionParser # those are defaults, but can be overidden using command line SSH = 'ssh' USER = None HOST = 'localhost' def xfer_files(ssh, srcdir, host, user, targ_destdir): """Transfer executable files to target Use rsync to copy the directory containing program to run INTO a destination directory on the target. An exact copy of the source directory is created on the target machine, possibly deleting files on the target machine which do not exist on the source directory. The idea is that the test may include files in addition to the compiled binary, and all of those files reside alongside the binary in a source directory. For example, if the test to run is /foo/bar/test and the destination directory on the target is /tbaz, then /tbaz/bar on the target shall be an exact copy of /foo/bar on the source, including deletion of files inside /tbaz/bar which do not exist on the source. """ userhost = host if user: userhost = '%s@%s' % (user, host) cmd = 'rsync --verbose -rl --ignore-times --delete -e "%s" %s %s:%s/' % \ (ssh, srcdir, userhost, targ_destdir) p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (out, err) = p.communicate() if p.returncode != 0: raise Exception('failed syncing files\n stdout:\n%s\nstderr:%s\n' % (out, err)) def exec_remote(ssh, host, user, destdir, targdir, prog, args): """Run a test on the target Using password-less ssh, run the compiled binary on the target. An assumption is that there's no need to cd into the target dir, same as there's no need to do it on a native build. """ userhost = host if user: userhost = '%s@%s' % (user, host) cmd = '%s %s %s/%s/%s' % (ssh, userhost, destdir, targdir, prog) if args: cmd = cmd + ' ' + ' '.join(args) p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (out, err) = p.communicate() return (p.returncode, out) def main(argv): usage = "usage: %prog [options] [args]" parser = OptionParser(usage) parser.add_option('--ssh', help="SSH client and additional flags", default=SSH) parser.add_option('--host', help="target host name or IP address", default=HOST) parser.add_option('--user', help="login user on target", default=USER) parser.add_option('--destdir', help="work directory on target", default='~') (options, args) = parser.parse_args(argv) if len(args) < 1: parser.error("please supply test program to run") progpath = args[0] # assume that a test that was not compiled fails (e.g. getconf) if progpath[0] != '/': return (1, "") progdir = os.path.dirname(progpath) prog = os.path.basename(progpath) targ_progdir = os.path.basename(progdir) xfer_files( options.ssh, progdir, options.host, options.user, options.destdir) (rc, out) = exec_remote(options.ssh, options.host, options.user, options.destdir, targ_progdir, prog, args[1:]) return (rc, out) if __name__ == '__main__': (rc, out) = main(sys.argv[1:]) sys.stdout.write(out) sys.exit(rc) tdb-1.4.2/buildtools/scripts/Makefile.waf0000660000000000000000000000176712406075657020335 0ustar rootroot00000000000000# simple makefile wrapper to run waf WAF_BINARY=BUILDTOOLS/bin/waf WAF=WAF_MAKE=1 $(WAF_BINARY) all: $(WAF) build install: $(WAF) install uninstall: $(WAF) uninstall test: $(WAF) test $(TEST_OPTIONS) help: @echo NOTE: to run extended waf options use $(WAF_BINARY) or modify your PATH $(WAF) --help testenv: $(WAF) test --testenv $(TEST_OPTIONS) quicktest: $(WAF) test --quick $(TEST_OPTIONS) dist: $(WAF) dist distcheck: $(WAF) distcheck clean: $(WAF) clean distclean: $(WAF) distclean reconfigure: configure $(WAF) reconfigure show_waf_options: $(WAF) --help # some compatibility make targets everything: all testsuite: all check: test torture: all # this should do an install as well, once install is finished installcheck: test etags: $(WAF) etags ctags: $(WAF) ctags bin/%:: FORCE $(WAF) --targets=$@ FORCE: configure: autogen-waf.sh BUILDTOOLS/scripts/configure.waf ./autogen-waf.sh Makefile: autogen-waf.sh configure BUILDTOOLS/scripts/Makefile.waf ./autogen-waf.sh tdb-1.4.2/buildtools/scripts/abi_gen.sh0000770000000000000000000000075613444661620020032 0ustar rootroot00000000000000#!/bin/sh # generate a set of ABI signatures from a shared library SHAREDLIB="$1" GDBSCRIPT="gdb_syms.$$" ( cat < $GDBSCRIPT # forcing the terminal avoids a problem on Fedora12 TERM=none gdb -n -batch -x $GDBSCRIPT "$SHAREDLIB" < /dev/null rm -f $GDBSCRIPT tdb-1.4.2/buildtools/scripts/autogen-waf.sh0000770000000000000000000000135212406075657020662 0ustar rootroot00000000000000#!/bin/sh p=`dirname $0` echo "Setting up for waf build" echo "Looking for the buildtools directory" d="buildtools" while test \! -d "$p/$d"; do d="../$d"; done echo "Found buildtools in $p/$d" echo "Setting up configure" rm -f $p/configure $p/include/config*.h* sed "s|BUILDTOOLS|$d|g;s|BUILDPATH|$p|g" < "$p/$d/scripts/configure.waf" > $p/configure chmod +x $p/configure echo "Setting up Makefile" rm -f $p/makefile $p/Makefile sed "s|BUILDTOOLS|$d|g" < "$p/$d/scripts/Makefile.waf" > $p/Makefile echo "done. Now run $p/configure or $p/configure.developer then make." if [ $p != "." ]; then echo "Notice: The build invoke path is not 'source4'! Use make with the parameter" echo "-C <'source4' path>. Example: make -C source4 all" fi tdb-1.4.2/buildtools/scripts/configure.waf0000770000000000000000000000037112406075657020571 0ustar rootroot00000000000000#!/bin/sh PREVPATH=`dirname $0` WAF=BUILDTOOLS/bin/waf # using JOBS=1 gives maximum compatibility with # systems like AIX which have broken threading in python JOBS=1 export JOBS cd BUILDPATH || exit 1 $WAF configure "$@" || exit 1 cd $PREVPATH tdb-1.4.2/buildtools/testwaf.sh0000770000000000000000000000260012406075657016430 0ustar rootroot00000000000000#!/bin/bash set -e set -x d=$(dirname $0) cd $d/.. PREFIX=$HOME/testprefix if [ $# -gt 0 ]; then tests="$*" else tests="lib/replace lib/talloc lib/tevent lib/tdb lib/ldb" fi echo "testing in dirs $tests" for d in $tests; do echo "`date`: testing $d" pushd $d rm -rf bin type waf waf dist ./configure -C --enable-developer --prefix=$PREFIX time make make install make distcheck case $d in "lib/ldb") ldd bin/ldbadd ;; "lib/replace") ldd bin/replace_testsuite ;; "lib/talloc") ldd bin/talloc_testsuite ;; "lib/tdb") ldd bin/tdbtool ;; esac popd done echo "testing python portability" pushd lib/talloc versions="python2.4 python2.5 python2.6 python3.0 python3.1" for p in $versions; do ret=$(which $p || echo "failed") if [ $ret = "failed" ]; then echo "$p not found, skipping" continue fi echo "Testing $p" $p ../../buildtools/bin/waf configure -C --enable-developer --prefix=$PREFIX $p ../../buildtools/bin/waf build install done popd echo "testing cross compiling" pushd lib/talloc ret=$(which arm-linux-gnueabi-gcc || echo "failed") if [ $ret != "failed" ]; then CC=arm-linux-gnueabi-gcc ./configure -C --prefix=$PREFIX --cross-compile --cross-execute='runarm' make && make install else echo "Cross-compiler not installed, skipping test" fi popd tdb-1.4.2/buildtools/wafsamba/README0000660000000000000000000000037712406075657017067 0ustar rootroot00000000000000This is a set of waf 'tools' to help make building the Samba components easier, by having common functions in one place. This gives us a more consistent build, and ensures that our project rules are obeyed TODO: see http://wiki.samba.org/index.php/Waf tdb-1.4.2/buildtools/wafsamba/__init__.py0000660000000000000000000000000012406075657020277 0ustar rootroot00000000000000tdb-1.4.2/buildtools/wafsamba/configure_file.py0000660000000000000000000000233113444661620021523 0ustar rootroot00000000000000# handle substitution of variables in .in files import sys import re import os from waflib import Build, Logs from samba_utils import SUBST_VARS_RECURSIVE def subst_at_vars(task): '''substiture @VAR@ style variables in a file''' env = task.env s = task.inputs[0].read() # split on the vars a = re.split('(@\w+@)', s) out = [] for v in a: if re.match('@\w+@', v): vname = v[1:-1] if not vname in task.env and vname.upper() in task.env: vname = vname.upper() if not vname in task.env: Logs.error("Unknown substitution %s in %s" % (v, task.name)) sys.exit(1) v = SUBST_VARS_RECURSIVE(task.env[vname], task.env) out.append(v) contents = ''.join(out) task.outputs[0].write(contents) return 0 def CONFIGURE_FILE(bld, in_file, **kwargs): '''configure file''' base=os.path.basename(in_file) t = bld.SAMBA_GENERATOR('INFILE_%s' % base, rule = subst_at_vars, source = in_file + '.in', target = in_file, vars = kwargs) Build.BuildContext.CONFIGURE_FILE = CONFIGURE_FILE tdb-1.4.2/buildtools/wafsamba/generic_cc.py0000660000000000000000000000324413444661620020630 0ustar rootroot00000000000000 # compiler definition for a generic C compiler # based on suncc.py from waf import os, optparse from waflib import Errors from waflib.Tools import ccroot, ar from waflib.Configure import conf # # Let waflib provide useful defaults, but # provide generic_cc as last resort fallback on # all platforms # from waflib.Tools.compiler_c import c_compiler for key in c_compiler.keys(): c_compiler[key].append('generic_cc') @conf def find_generic_cc(conf): v = conf.env cc = None if v.CC: cc = v.CC elif 'CC' in conf.environ: cc = conf.environ['CC'] if not cc: cc = conf.find_program('cc', var='CC') if not cc: conf.fatal('generic_cc was not found') try: conf.cmd_and_log(cc + ['--version']) except Errors.WafError: conf.fatal('%r --version could not be executed' % cc) v.CC = cc v.CC_NAME = 'generic_cc' @conf def generic_cc_common_flags(conf): v = conf.env v.CC_SRC_F = '' v.CC_TGT_F = ['-c', '-o'] v.CPPPATH_ST = '-I%s' v.DEFINES_ST = '-D%s' if not v.LINK_CC: v.LINK_CC = v.CC v.CCLNK_SRC_F = '' v.CCLNK_TGT_F = ['-o'] v.LIB_ST = '-l%s' # template for adding libs v.LIBPATH_ST = '-L%s' # template for adding libpaths v.STLIB_ST = '-l%s' v.STLIBPATH_ST = '-L%s' v.cprogram_PATTERN = '%s' v.cshlib_PATTERN = 'lib%s.so' v.cstlib_PATTERN = 'lib%s.a' def configure(conf): conf.find_generic_cc() conf.find_ar() conf.generic_cc_common_flags() conf.cc_load_tools() conf.cc_add_flags() conf.link_add_flags() tdb-1.4.2/buildtools/wafsamba/pkgconfig.py0000660000000000000000000000457613444661620020527 0ustar rootroot00000000000000# handle substitution of variables in pc files import os, re, sys from waflib import Build, Logs from samba_utils import SUBST_VARS_RECURSIVE, TO_LIST def subst_at_vars(task): '''substiture @VAR@ style variables in a file''' s = task.inputs[0].read() # split on the vars a = re.split('(@\w+@)', s) out = [] done_var = {} back_sub = [ ('PREFIX', '${prefix}'), ('EXEC_PREFIX', '${exec_prefix}')] for v in a: if re.match('@\w+@', v): vname = v[1:-1] if not vname in task.env and vname.upper() in task.env: vname = vname.upper() if not vname in task.env: Logs.error("Unknown substitution %s in %s" % (v, task.name)) sys.exit(1) v = SUBST_VARS_RECURSIVE(task.env[vname], task.env) # now we back substitute the allowed pc vars for (b, m) in back_sub: s = task.env[b] if s == v[0:len(s)]: if not b in done_var: # we don't want to substitute the first usage done_var[b] = True else: v = m + v[len(s):] break out.append(v) contents = ''.join(out) task.outputs[0].write(contents) return 0 def PKG_CONFIG_FILES(bld, pc_files, vnum=None, extra_name=None): '''install some pkg_config pc files''' dest = '${PKGCONFIGDIR}' dest = bld.EXPAND_VARIABLES(dest) for f in TO_LIST(pc_files): if extra_name: target = f.split('.pc')[0] + extra_name + ".pc" else: target = f base=os.path.basename(target) t = bld.SAMBA_GENERATOR('PKGCONFIG_%s' % base, rule=subst_at_vars, source=f+'.in', target=target) bld.add_manual_dependency(bld.path.find_or_declare(f), bld.env['PREFIX'].encode('utf8')) t.vars = [] if t.env.RPATH_ON_INSTALL: t.env.LIB_RPATH = t.env.RPATH_ST % t.env.LIBDIR else: t.env.LIB_RPATH = '' if vnum: t.env.PACKAGE_VERSION = vnum for v in [ 'PREFIX', 'EXEC_PREFIX', 'LIB_RPATH' ]: t.vars.append(t.env[v]) bld.INSTALL_FILES(dest, target, flat=True, destname=base) Build.BuildContext.PKG_CONFIG_FILES = PKG_CONFIG_FILES tdb-1.4.2/buildtools/wafsamba/samba3.py0000660000000000000000000000772013444661620017720 0ustar rootroot00000000000000# a waf tool to add autoconf-like macros to the configure section # and for SAMBA_ macros for building libraries, binaries etc import os from waflib import Build from samba_utils import os_path_relpath, TO_LIST from samba_autoconf import library_flags def SAMBA3_IS_STATIC_MODULE(bld, module): '''Check whether module is in static list''' if module in bld.env['static_modules']: return True return False Build.BuildContext.SAMBA3_IS_STATIC_MODULE = SAMBA3_IS_STATIC_MODULE def SAMBA3_IS_SHARED_MODULE(bld, module): '''Check whether module is in shared list''' if module in bld.env['shared_modules']: return True return False Build.BuildContext.SAMBA3_IS_SHARED_MODULE = SAMBA3_IS_SHARED_MODULE def SAMBA3_IS_ENABLED_MODULE(bld, module): '''Check whether module is in either shared or static list ''' return SAMBA3_IS_STATIC_MODULE(bld, module) or SAMBA3_IS_SHARED_MODULE(bld, module) Build.BuildContext.SAMBA3_IS_ENABLED_MODULE = SAMBA3_IS_ENABLED_MODULE def s3_fix_kwargs(bld, kwargs): '''fix the build arguments for s3 build rules to include the necessary includes, subdir and cflags options ''' s3dir = os.path.join(bld.env.srcdir, 'source3') s3reldir = os_path_relpath(s3dir, bld.path.abspath()) # the extra_includes list is relative to the source3 directory extra_includes = [ '.', 'include', 'lib' ] # local heimdal paths only included when USING_SYSTEM_KRB5 is not set if not bld.CONFIG_SET("USING_SYSTEM_KRB5"): extra_includes += [ '../source4/heimdal/lib/com_err', '../source4/heimdal/lib/krb5', '../source4/heimdal/lib/gssapi', '../source4/heimdal_build', '../bin/default/source4/heimdal/lib/asn1' ] if bld.CONFIG_SET('USING_SYSTEM_TDB'): (tdb_includes, tdb_ldflags, tdb_cpppath) = library_flags(bld, 'tdb') extra_includes += tdb_cpppath else: extra_includes += [ '../lib/tdb/include' ] if bld.CONFIG_SET('USING_SYSTEM_TEVENT'): (tevent_includes, tevent_ldflags, tevent_cpppath) = library_flags(bld, 'tevent') extra_includes += tevent_cpppath else: extra_includes += [ '../lib/tevent' ] if bld.CONFIG_SET('USING_SYSTEM_TALLOC'): (talloc_includes, talloc_ldflags, talloc_cpppath) = library_flags(bld, 'talloc') extra_includes += talloc_cpppath else: extra_includes += [ '../lib/talloc' ] if bld.CONFIG_SET('USING_SYSTEM_POPT'): (popt_includes, popt_ldflags, popt_cpppath) = library_flags(bld, 'popt') extra_includes += popt_cpppath else: extra_includes += [ '../lib/popt' ] # s3 builds assume that they will have a bunch of extra include paths includes = [] for d in extra_includes: includes += [ os.path.join(s3reldir, d) ] # the rule may already have some includes listed if 'includes' in kwargs: includes += TO_LIST(kwargs['includes']) kwargs['includes'] = includes # these wrappers allow for mixing of S3 and S4 build rules in the one build def SAMBA3_LIBRARY(bld, name, *args, **kwargs): s3_fix_kwargs(bld, kwargs) return bld.SAMBA_LIBRARY(name, *args, **kwargs) Build.BuildContext.SAMBA3_LIBRARY = SAMBA3_LIBRARY def SAMBA3_MODULE(bld, name, *args, **kwargs): s3_fix_kwargs(bld, kwargs) return bld.SAMBA_MODULE(name, *args, **kwargs) Build.BuildContext.SAMBA3_MODULE = SAMBA3_MODULE def SAMBA3_SUBSYSTEM(bld, name, *args, **kwargs): s3_fix_kwargs(bld, kwargs) return bld.SAMBA_SUBSYSTEM(name, *args, **kwargs) Build.BuildContext.SAMBA3_SUBSYSTEM = SAMBA3_SUBSYSTEM def SAMBA3_BINARY(bld, name, *args, **kwargs): s3_fix_kwargs(bld, kwargs) return bld.SAMBA_BINARY(name, *args, **kwargs) Build.BuildContext.SAMBA3_BINARY = SAMBA3_BINARY def SAMBA3_PYTHON(bld, name, *args, **kwargs): s3_fix_kwargs(bld, kwargs) return bld.SAMBA_PYTHON(name, *args, **kwargs) Build.BuildContext.SAMBA3_PYTHON = SAMBA3_PYTHON tdb-1.4.2/buildtools/wafsamba/samba_abi.py0000660000000000000000000002153613444661620020451 0ustar rootroot00000000000000# functions for handling ABI checking of libraries import os import sys import re import fnmatch from waflib import Options, Utils, Logs, Task, Build, Errors from waflib.TaskGen import feature, before, after from wafsamba import samba_utils # these type maps cope with platform specific names for common types # please add new type mappings into the list below abi_type_maps = { '_Bool' : 'bool', 'struct __va_list_tag *' : 'va_list' } version_key = lambda x: list(map(int, x.split("."))) def normalise_signature(sig): '''normalise a signature from gdb''' sig = sig.strip() sig = re.sub('^\$[0-9]+\s=\s\{(.+)\}$', r'\1', sig) sig = re.sub('^\$[0-9]+\s=\s\{(.+)\}(\s0x[0-9a-f]+\s<\w+>)+$', r'\1', sig) sig = re.sub('^\$[0-9]+\s=\s(0x[0-9a-f]+)\s?(<\w+>)?$', r'\1', sig) sig = re.sub('0x[0-9a-f]+', '0xXXXX', sig) sig = re.sub('", ', r'\1"', sig) for t in abi_type_maps: # we need to cope with non-word characters in mapped types m = t m = m.replace('*', '\*') if m[-1].isalnum() or m[-1] == '_': m += '\\b' if m[0].isalnum() or m[0] == '_': m = '\\b' + m sig = re.sub(m, abi_type_maps[t], sig) return sig def normalise_varargs(sig): '''cope with older versions of gdb''' sig = re.sub(',\s\.\.\.', '', sig) return sig def parse_sigs(sigs, abi_match): '''parse ABI signatures file''' abi_match = samba_utils.TO_LIST(abi_match) ret = {} a = sigs.split('\n') for s in a: if s.find(':') == -1: continue sa = s.split(':') if abi_match: matched = False negative = False for p in abi_match: if p[0] == '!' and fnmatch.fnmatch(sa[0], p[1:]): negative = True break elif fnmatch.fnmatch(sa[0], p): matched = True break if (not matched) and negative: continue Logs.debug("%s -> %s" % (sa[1], normalise_signature(sa[1]))) ret[sa[0]] = normalise_signature(sa[1]) return ret def save_sigs(sig_file, parsed_sigs): '''save ABI signatures to a file''' sigs = '' for s in sorted(parsed_sigs.keys()): sigs += '%s: %s\n' % (s, parsed_sigs[s]) return samba_utils.save_file(sig_file, sigs, create_dir=True) def abi_check_task(self): '''check if the ABI has changed''' abi_gen = self.ABI_GEN libpath = self.inputs[0].abspath(self.env) libname = os.path.basename(libpath) sigs = samba_utils.get_string(Utils.cmd_output([abi_gen, libpath])) parsed_sigs = parse_sigs(sigs, self.ABI_MATCH) sig_file = self.ABI_FILE old_sigs = samba_utils.load_file(sig_file) if old_sigs is None or Options.options.ABI_UPDATE: if not save_sigs(sig_file, parsed_sigs): raise Errors.WafError('Failed to save ABI file "%s"' % sig_file) Logs.warn('Generated ABI signatures %s' % sig_file) return parsed_old_sigs = parse_sigs(old_sigs, self.ABI_MATCH) # check all old sigs got_error = False for s in parsed_old_sigs: if not s in parsed_sigs: Logs.error('%s: symbol %s has been removed - please update major version\n\tsignature: %s' % ( libname, s, parsed_old_sigs[s])) got_error = True elif normalise_varargs(parsed_old_sigs[s]) != normalise_varargs(parsed_sigs[s]): Logs.error('%s: symbol %s has changed - please update major version\n\told_signature: %s\n\tnew_signature: %s' % ( libname, s, parsed_old_sigs[s], parsed_sigs[s])) got_error = True for s in parsed_sigs: if not s in parsed_old_sigs: Logs.error('%s: symbol %s has been added - please mark it _PRIVATE_ or update minor version\n\tsignature: %s' % ( libname, s, parsed_sigs[s])) got_error = True if got_error: raise Errors.WafError('ABI for %s has changed - please fix library version then build with --abi-update\nSee http://wiki.samba.org/index.php/Waf#ABI_Checking for more information\nIf you have not changed any ABI, and your platform always gives this error, please configure with --abi-check-disable to skip this check' % libname) t = Task.task_factory('abi_check', abi_check_task, color='BLUE', ext_in='.bin') t.quiet = True # allow "waf --abi-check" to force re-checking the ABI if '--abi-check' in sys.argv: t.always_run = True @after('apply_link') @feature('abi_check') def abi_check(self): '''check that ABI matches saved signatures''' env = self.bld.env if not env.ABI_CHECK or self.abi_directory is None: return # if the platform doesn't support -fvisibility=hidden then the ABI # checks become fairly meaningless if not env.HAVE_VISIBILITY_ATTR: return topsrc = self.bld.srcnode.abspath() abi_gen = os.path.join(topsrc, 'buildtools/scripts/abi_gen.sh') abi_file = "%s/%s-%s.sigs" % (self.abi_directory, self.version_libname, self.vnum) tsk = self.create_task('abi_check', self.link_task.outputs[0]) tsk.ABI_FILE = abi_file tsk.ABI_MATCH = self.abi_match tsk.ABI_GEN = abi_gen def abi_process_file(fname, version, symmap): '''process one ABI file, adding new symbols to the symmap''' for line in Utils.readf(fname).splitlines(): symname = line.split(":")[0] if not symname in symmap: symmap[symname] = version def abi_write_vscript(f, libname, current_version, versions, symmap, abi_match): """Write a vscript file for a library in --version-script format. :param f: File-like object to write to :param libname: Name of the library, uppercased :param current_version: Current version :param versions: Versions to consider :param symmap: Dictionary mapping symbols -> version :param abi_match: List of symbols considered to be public in the current version """ invmap = {} for s in symmap: invmap.setdefault(symmap[s], []).append(s) last_key = "" versions = sorted(versions, key=version_key) for k in versions: symver = "%s_%s" % (libname, k) if symver == current_version: break f.write("%s {\n" % symver) if k in sorted(invmap.keys()): f.write("\tglobal:\n") for s in invmap.get(k, []): f.write("\t\t%s;\n" % s); f.write("}%s;\n\n" % last_key) last_key = " %s" % symver f.write("%s {\n" % current_version) local_abi = list(filter(lambda x: x[0] == '!', abi_match)) global_abi = list(filter(lambda x: x[0] != '!', abi_match)) f.write("\tglobal:\n") if len(global_abi) > 0: for x in global_abi: f.write("\t\t%s;\n" % x) else: f.write("\t\t*;\n") # Always hide symbols that must be local if exist local_abi.extend(["!_end", "!__bss_start", "!_edata"]) f.write("\tlocal:\n") for x in local_abi: f.write("\t\t%s;\n" % x[1:]) if global_abi != ["*"]: if len(global_abi) > 0: f.write("\t\t*;\n") f.write("};\n") def abi_build_vscript(task): '''generate a vscript file for our public libraries''' tgt = task.outputs[0].bldpath(task.env) symmap = {} versions = [] for f in task.inputs: fname = f.abspath(task.env) basename = os.path.basename(fname) version = basename[len(task.env.LIBNAME)+1:-len(".sigs")] versions.append(version) abi_process_file(fname, version, symmap) f = open(tgt, mode='w') try: abi_write_vscript(f, task.env.LIBNAME, task.env.VERSION, versions, symmap, task.env.ABI_MATCH) finally: f.close() def ABI_VSCRIPT(bld, libname, abi_directory, version, vscript, abi_match=None): '''generate a vscript file for our public libraries''' if abi_directory: source = bld.path.ant_glob('%s/%s-[0-9]*.sigs' % (abi_directory, libname), flat=True) def abi_file_key(path): return version_key(path[:-len(".sigs")].rsplit("-")[-1]) source = sorted(source.split(), key=abi_file_key) else: source = '' libname = os.path.basename(libname) version = os.path.basename(version) libname = libname.replace("-", "_").replace("+","_").upper() version = version.replace("-", "_").replace("+","_").upper() t = bld.SAMBA_GENERATOR(vscript, rule=abi_build_vscript, source=source, group='vscripts', target=vscript) if abi_match is None: abi_match = ["*"] else: abi_match = samba_utils.TO_LIST(abi_match) t.env.ABI_MATCH = abi_match t.env.VERSION = version t.env.LIBNAME = libname t.vars = ['LIBNAME', 'VERSION', 'ABI_MATCH'] Build.BuildContext.ABI_VSCRIPT = ABI_VSCRIPT tdb-1.4.2/buildtools/wafsamba/samba_autoconf.py0000660000000000000000000010024113527011454021517 0ustar rootroot00000000000000# a waf tool to add autoconf-like macros to the configure section import os, sys from waflib import Build, Options, Logs, Context from waflib.Configure import conf from waflib.TaskGen import feature from waflib.Tools import c_preproc as preproc from samba_utils import TO_LIST, GET_TARGET_TYPE, SET_TARGET_TYPE, unique_list, mkdir_p missing_headers = set() #################################################### # some autoconf like helpers, to make the transition # to waf a bit easier for those used to autoconf # m4 files @conf def DEFINE(conf, d, v, add_to_cflags=False, quote=False): '''define a config option''' conf.define(d, v, quote=quote) if add_to_cflags: conf.env.append_value('CFLAGS', '-D%s=%s' % (d, str(v))) def hlist_to_string(conf, headers=None): '''convert a headers list to a set of #include lines''' hdrs='' hlist = conf.env.hlist if headers: hlist = hlist[:] hlist.extend(TO_LIST(headers)) for h in hlist: hdrs += '#include <%s>\n' % h return hdrs @conf def COMPOUND_START(conf, msg): '''start a compound test''' def null_check_message_1(self,*k,**kw): return def null_check_message_2(self,*k,**kw): return v = getattr(conf.env, 'in_compound', []) if v != [] and v != 0: conf.env.in_compound = v + 1 return conf.start_msg(msg) conf.saved_check_message_1 = conf.start_msg conf.start_msg = null_check_message_1 conf.saved_check_message_2 = conf.end_msg conf.end_msg = null_check_message_2 conf.env.in_compound = 1 @conf def COMPOUND_END(conf, result): '''start a compound test''' conf.env.in_compound -= 1 if conf.env.in_compound != 0: return conf.start_msg = conf.saved_check_message_1 conf.end_msg = conf.saved_check_message_2 p = conf.end_msg if result is True: p('ok') elif not result: p('not found', 'YELLOW') else: p(result) @feature('nolink') def nolink(self): '''using the nolink type in conf.check() allows us to avoid the link stage of a test, thus speeding it up for tests that where linking is not needed''' pass def CHECK_HEADER(conf, h, add_headers=False, lib=None): '''check for a header''' if h in missing_headers and lib is None: return False d = h.upper().replace('/', '_') d = d.replace('.', '_') d = d.replace('-', '_') d = 'HAVE_%s' % d if CONFIG_SET(conf, d): if add_headers: if not h in conf.env.hlist: conf.env.hlist.append(h) return True (ccflags, ldflags, cpppath) = library_flags(conf, lib) hdrs = hlist_to_string(conf, headers=h) if lib is None: lib = "" ret = conf.check(fragment='%s\nint main(void) { return 0; }\n' % hdrs, type='nolink', execute=0, cflags=ccflags, mandatory=False, includes=cpppath, uselib=lib.upper(), msg="Checking for header %s" % h) if not ret: missing_headers.add(h) return False conf.DEFINE(d, 1) if add_headers and not h in conf.env.hlist: conf.env.hlist.append(h) return ret @conf def CHECK_HEADERS(conf, headers, add_headers=False, together=False, lib=None): '''check for a list of headers when together==True, then the headers accumulate within this test. This is useful for interdependent headers ''' ret = True if not add_headers and together: saved_hlist = conf.env.hlist[:] set_add_headers = True else: set_add_headers = add_headers for hdr in TO_LIST(headers): if not CHECK_HEADER(conf, hdr, set_add_headers, lib=lib): ret = False if not add_headers and together: conf.env.hlist = saved_hlist return ret def header_list(conf, headers=None, lib=None): '''form a list of headers which exist, as a string''' hlist=[] if headers is not None: for h in TO_LIST(headers): if CHECK_HEADER(conf, h, add_headers=False, lib=lib): hlist.append(h) return hlist_to_string(conf, headers=hlist) @conf def CHECK_TYPE(conf, t, alternate=None, headers=None, define=None, lib=None, msg=None): '''check for a single type''' if define is None: define = 'HAVE_' + t.upper().replace(' ', '_') if msg is None: msg='Checking for %s' % t ret = CHECK_CODE(conf, '%s _x' % t, define, execute=False, headers=headers, local_include=False, msg=msg, lib=lib, link=False) if not ret and alternate: conf.DEFINE(t, alternate) return ret @conf def CHECK_TYPES(conf, list, headers=None, define=None, alternate=None, lib=None): '''check for a list of types''' ret = True for t in TO_LIST(list): if not CHECK_TYPE(conf, t, headers=headers, define=define, alternate=alternate, lib=lib): ret = False return ret @conf def CHECK_TYPE_IN(conf, t, headers=None, alternate=None, define=None): '''check for a single type with a header''' return CHECK_TYPE(conf, t, headers=headers, alternate=alternate, define=define) @conf def CHECK_VARIABLE(conf, v, define=None, always=False, headers=None, msg=None, lib=None): '''check for a variable declaration (or define)''' if define is None: define = 'HAVE_%s' % v.upper() if msg is None: msg="Checking for variable %s" % v return CHECK_CODE(conf, # we need to make sure the compiler doesn't # optimize it out... ''' #ifndef %s void *_x; _x=(void *)&%s; return (int)_x; #endif return 0 ''' % (v, v), execute=False, link=False, msg=msg, local_include=False, lib=lib, headers=headers, define=define, always=always) @conf def CHECK_DECLS(conf, vars, reverse=False, headers=None, always=False): '''check a list of variable declarations, using the HAVE_DECL_xxx form of define When reverse==True then use HAVE_xxx_DECL instead of HAVE_DECL_xxx ''' ret = True for v in TO_LIST(vars): if not reverse: define='HAVE_DECL_%s' % v.upper() else: define='HAVE_%s_DECL' % v.upper() if not CHECK_VARIABLE(conf, v, define=define, headers=headers, msg='Checking for declaration of %s' % v, always=always): if not CHECK_CODE(conf, ''' return (int)%s; ''' % (v), execute=False, link=False, msg='Checking for declaration of %s (as enum)' % v, local_include=False, headers=headers, define=define, always=always): ret = False return ret def CHECK_FUNC(conf, f, link=True, lib=None, headers=None): '''check for a function''' define='HAVE_%s' % f.upper() ret = False in_lib_str = "" if lib: in_lib_str = " in %s" % lib conf.COMPOUND_START('Checking for %s%s' % (f, in_lib_str)) if link is None or link: ret = CHECK_CODE(conf, # this is based on the autoconf strategy ''' #define %s __fake__%s #ifdef HAVE_LIMITS_H # include #else # include #endif #undef %s #if defined __stub_%s || defined __stub___%s #error "bad glibc stub" #endif extern char %s(); int main() { return %s(); } ''' % (f, f, f, f, f, f, f), execute=False, link=True, addmain=False, add_headers=False, define=define, local_include=False, lib=lib, headers=headers, msg='Checking for %s' % f) if not ret: ret = CHECK_CODE(conf, # it might be a macro # we need to make sure the compiler doesn't # optimize it out... 'void *__x = (void *)%s; return (int)__x' % f, execute=False, link=True, addmain=True, add_headers=True, define=define, local_include=False, lib=lib, headers=headers, msg='Checking for macro %s' % f) if not ret and (link is None or not link): ret = CHECK_VARIABLE(conf, f, define=define, headers=headers, msg='Checking for declaration of %s' % f) conf.COMPOUND_END(ret) return ret @conf def CHECK_FUNCS(conf, list, link=True, lib=None, headers=None): '''check for a list of functions''' ret = True for f in TO_LIST(list): if not CHECK_FUNC(conf, f, link=link, lib=lib, headers=headers): ret = False return ret @conf def CHECK_SIZEOF(conf, vars, headers=None, define=None, critical=True): '''check the size of a type''' for v in TO_LIST(vars): v_define = define ret = False if v_define is None: v_define = 'SIZEOF_%s' % v.upper().replace(' ', '_') for size in list((1, 2, 4, 8, 16, 32, 64)): if CHECK_CODE(conf, 'static int test_array[1 - 2 * !(((long int)(sizeof(%s))) <= %d)];' % (v, size), define=v_define, quote=False, headers=headers, local_include=False, msg="Checking if size of %s == %d" % (v, size)): conf.DEFINE(v_define, size) ret = True break if not ret and critical: Logs.error("Couldn't determine size of '%s'" % v) sys.exit(1) return ret @conf def CHECK_VALUEOF(conf, v, headers=None, define=None): '''check the value of a variable/define''' ret = True v_define = define if v_define is None: v_define = 'VALUEOF_%s' % v.upper().replace(' ', '_') if CHECK_CODE(conf, 'printf("%%u", (unsigned)(%s))' % v, define=v_define, execute=True, define_ret=True, quote=False, headers=headers, local_include=False, msg="Checking value of %s" % v): return int(conf.env[v_define]) return None @conf def CHECK_CODE(conf, code, define, always=False, execute=False, addmain=True, add_headers=True, mandatory=False, headers=None, msg=None, cflags='', includes='# .', local_include=True, lib=None, link=True, define_ret=False, quote=False, on_target=True, strict=False): '''check if some code compiles and/or runs''' if CONFIG_SET(conf, define): return True if headers is not None: CHECK_HEADERS(conf, headers=headers, lib=lib) if add_headers: hdrs = header_list(conf, headers=headers, lib=lib) else: hdrs = '' if execute: execute = 1 else: execute = 0 if addmain: fragment='%s\n int main(void) { %s; return 0; }\n' % (hdrs, code) else: fragment='%s\n%s\n' % (hdrs, code) if msg is None: msg="Checking for %s" % define cflags = TO_LIST(cflags) # Be strict when relying on a compiler check # Some compilers (e.g. xlc) ignore non-supported features as warnings if strict: if 'WERROR_CFLAGS' in conf.env: cflags.extend(conf.env['WERROR_CFLAGS']) if local_include: cflags.append('-I%s' % conf.path.abspath()) if not link: type='nolink' else: type='cprogram' uselib = TO_LIST(lib) (ccflags, ldflags, cpppath) = library_flags(conf, uselib) includes = TO_LIST(includes) includes.extend(cpppath) uselib = [l.upper() for l in uselib] cflags.extend(ccflags) if on_target: exec_args = conf.SAMBA_CROSS_ARGS(msg=msg) else: exec_args = [] conf.COMPOUND_START(msg) try: ret = conf.check(fragment=fragment, execute=execute, define_name = define, cflags=cflags, ldflags=ldflags, includes=includes, uselib=uselib, type=type, msg=msg, quote=quote, exec_args=exec_args, define_ret=define_ret) except Exception: if always: conf.DEFINE(define, 0) else: conf.undefine(define) conf.COMPOUND_END(False) if mandatory: raise return False else: # Success is indicated by ret but we should unset # defines set by WAF's c_config.check() because it # defines it to int(ret) and we want to undefine it if not ret: conf.undefine(define) conf.COMPOUND_END(False) return False if not define_ret: conf.DEFINE(define, 1) conf.COMPOUND_END(True) else: conf.DEFINE(define, ret, quote=quote) conf.COMPOUND_END(ret) return True @conf def CHECK_STRUCTURE_MEMBER(conf, structname, member, always=False, define=None, headers=None, lib=None): '''check for a structure member''' if define is None: define = 'HAVE_%s' % member.upper() return CHECK_CODE(conf, '%s s; void *_x; _x=(void *)&s.%s' % (structname, member), define, execute=False, link=False, lib=lib, always=always, headers=headers, local_include=False, msg="Checking for member %s in %s" % (member, structname)) @conf def CHECK_CFLAGS(conf, cflags, fragment='int main(void) { return 0; }\n'): '''check if the given cflags are accepted by the compiler ''' check_cflags = TO_LIST(cflags) if 'WERROR_CFLAGS' in conf.env: check_cflags.extend(conf.env['WERROR_CFLAGS']) return conf.check(fragment=fragment, execute=0, mandatory=False, type='nolink', cflags=check_cflags, msg="Checking compiler accepts %s" % cflags) @conf def CHECK_LDFLAGS(conf, ldflags): '''check if the given ldflags are accepted by the linker ''' return conf.check(fragment='int main(void) { return 0; }\n', execute=0, ldflags=ldflags, mandatory=False, msg="Checking linker accepts %s" % ldflags) @conf def CONFIG_GET(conf, option): '''return True if a configuration option was found''' if (option in conf.env): return conf.env[option] else: return None @conf def CONFIG_SET(conf, option): '''return True if a configuration option was found''' if option not in conf.env: return False v = conf.env[option] if v is None: return False if v == []: return False if v == (): return False return True @conf def CONFIG_RESET(conf, option): if option not in conf.env: return del conf.env[option] Build.BuildContext.CONFIG_RESET = CONFIG_RESET Build.BuildContext.CONFIG_SET = CONFIG_SET Build.BuildContext.CONFIG_GET = CONFIG_GET def library_flags(self, libs): '''work out flags from pkg_config''' ccflags = [] ldflags = [] cpppath = [] for lib in TO_LIST(libs): # note that we do not add the -I and -L in here, as that is added by the waf # core. Adding it here would just change the order that it is put on the link line # which can cause system paths to be added before internal libraries extra_ccflags = TO_LIST(getattr(self.env, 'CFLAGS_%s' % lib.upper(), [])) extra_ldflags = TO_LIST(getattr(self.env, 'LDFLAGS_%s' % lib.upper(), [])) extra_cpppath = TO_LIST(getattr(self.env, 'CPPPATH_%s' % lib.upper(), [])) ccflags.extend(extra_ccflags) ldflags.extend(extra_ldflags) cpppath.extend(extra_cpppath) extra_cpppath = TO_LIST(getattr(self.env, 'INCLUDES_%s' % lib.upper(), [])) cpppath.extend(extra_cpppath) if 'EXTRA_LDFLAGS' in self.env: ldflags.extend(self.env['EXTRA_LDFLAGS']) ccflags = unique_list(ccflags) ldflags = unique_list(ldflags) cpppath = unique_list(cpppath) return (ccflags, ldflags, cpppath) @conf def CHECK_LIB(conf, libs, mandatory=False, empty_decl=True, set_target=True, shlib=False): '''check if a set of libraries exist as system libraries returns the sublist of libs that do exist as a syslib or [] ''' fragment= ''' int foo() { int v = 2; return v*2; } ''' ret = [] liblist = TO_LIST(libs) for lib in liblist[:]: if GET_TARGET_TYPE(conf, lib) == 'SYSLIB': ret.append(lib) continue (ccflags, ldflags, cpppath) = library_flags(conf, lib) if shlib: res = conf.check(features='c cshlib', fragment=fragment, lib=lib, uselib_store=lib, cflags=ccflags, ldflags=ldflags, uselib=lib.upper(), mandatory=False) else: res = conf.check(lib=lib, uselib_store=lib, cflags=ccflags, ldflags=ldflags, uselib=lib.upper(), mandatory=False) if not res: if mandatory: Logs.error("Mandatory library '%s' not found for functions '%s'" % (lib, list)) sys.exit(1) if empty_decl: # if it isn't a mandatory library, then remove it from dependency lists if set_target: SET_TARGET_TYPE(conf, lib, 'EMPTY') else: conf.define('HAVE_LIB%s' % lib.upper().replace('-','_').replace('.','_'), 1) conf.env['LIB_' + lib.upper()] = lib if set_target: conf.SET_TARGET_TYPE(lib, 'SYSLIB') ret.append(lib) return ret @conf def CHECK_FUNCS_IN(conf, list, library, mandatory=False, checklibc=False, headers=None, link=True, empty_decl=True, set_target=True): """ check that the functions in 'list' are available in 'library' if they are, then make that library available as a dependency if the library is not available and mandatory==True, then raise an error. If the library is not available and mandatory==False, then add the library to the list of dependencies to remove from build rules optionally check for the functions first in libc """ remaining = TO_LIST(list) liblist = TO_LIST(library) # check if some already found for f in remaining[:]: if CONFIG_SET(conf, 'HAVE_%s' % f.upper()): remaining.remove(f) # see if the functions are in libc if checklibc: for f in remaining[:]: if CHECK_FUNC(conf, f, link=True, headers=headers): remaining.remove(f) if remaining == []: for lib in liblist: if GET_TARGET_TYPE(conf, lib) != 'SYSLIB' and empty_decl: SET_TARGET_TYPE(conf, lib, 'EMPTY') return True checklist = conf.CHECK_LIB(liblist, empty_decl=empty_decl, set_target=set_target) for lib in liblist[:]: if not lib in checklist and mandatory: Logs.error("Mandatory library '%s' not found for functions '%s'" % (lib, list)) sys.exit(1) ret = True for f in remaining: if not CHECK_FUNC(conf, f, lib=' '.join(checklist), headers=headers, link=link): ret = False return ret @conf def IN_LAUNCH_DIR(conf): '''return True if this rule is being run from the launch directory''' return os.path.realpath(conf.path.abspath()) == os.path.realpath(Context.launch_dir) Options.OptionsContext.IN_LAUNCH_DIR = IN_LAUNCH_DIR @conf def SAMBA_CONFIG_H(conf, path=None): '''write out config.h in the right directory''' # we don't want to produce a config.h in places like lib/replace # when we are building projects that depend on lib/replace if not IN_LAUNCH_DIR(conf): return # we need to build real code that can't be optimized away to test stack_protect_list = ['-fstack-protector-strong', '-fstack-protector'] for stack_protect_flag in stack_protect_list: flag_supported = conf.check(fragment=''' #include int main(void) { char t[100000]; while (fgets(t, sizeof(t), stdin)); return 0; } ''', execute=0, cflags=[ '-Werror', '-Wp,-D_FORTIFY_SOURCE=2', stack_protect_flag], mandatory=False, msg='Checking if compiler accepts %s' % (stack_protect_flag)) if flag_supported: conf.ADD_CFLAGS('%s' % (stack_protect_flag)) break flag_supported = conf.check(fragment=''' #include int main(void) { char t[100000]; while (fgets(t, sizeof(t), stdin)); return 0; } ''', execute=0, cflags=[ '-Werror', '-fstack-clash-protection'], mandatory=False, msg='Checking if compiler accepts -fstack-clash-protection') if flag_supported: conf.ADD_CFLAGS('-fstack-clash-protection') if Options.options.debug: conf.ADD_CFLAGS('-g', testflags=True) if Options.options.developer: conf.env.DEVELOPER_MODE = True conf.ADD_CFLAGS('-g', testflags=True) conf.ADD_CFLAGS('-Wall', testflags=True) conf.ADD_CFLAGS('-Wshadow', testflags=True) conf.ADD_CFLAGS('-Wmissing-prototypes', testflags=True) if CHECK_CODE(conf, 'struct a { int b; }; struct c { struct a d; } e = { };', 'CHECK_C99_INIT', link=False, cflags='-Wmissing-field-initializers -Werror=missing-field-initializers', msg="Checking C99 init of nested structs."): conf.ADD_CFLAGS('-Wmissing-field-initializers', testflags=True) conf.ADD_CFLAGS('-Wformat-overflow=2', testflags=True) conf.ADD_CFLAGS('-Wformat-zero-length', testflags=True) conf.ADD_CFLAGS('-Wcast-align -Wcast-qual', testflags=True) conf.ADD_CFLAGS('-fno-common', testflags=True) conf.ADD_CFLAGS('-Werror=address', testflags=True) # we add these here to ensure that -Wstrict-prototypes is not set during configure conf.ADD_CFLAGS('-Werror=strict-prototypes -Wstrict-prototypes', testflags=True) conf.ADD_CFLAGS('-Werror=write-strings -Wwrite-strings', testflags=True) conf.ADD_CFLAGS('-Werror-implicit-function-declaration', testflags=True) conf.ADD_CFLAGS('-Werror=pointer-arith -Wpointer-arith', testflags=True) conf.ADD_CFLAGS('-Werror=declaration-after-statement -Wdeclaration-after-statement', testflags=True) conf.ADD_CFLAGS('-Werror=return-type -Wreturn-type', testflags=True) conf.ADD_CFLAGS('-Werror=uninitialized -Wuninitialized', testflags=True) conf.ADD_CFLAGS('-Wimplicit-fallthrough', testflags=True) conf.ADD_CFLAGS('-Werror=strict-overflow -Wstrict-overflow=2', testflags=True) conf.ADD_CFLAGS('-Wformat=2 -Wno-format-y2k', testflags=True) conf.ADD_CFLAGS('-Wno-format-zero-length', testflags=True) conf.ADD_CFLAGS('-Werror=format-security -Wformat-security', testflags=True, prereq_flags='-Wformat') # This check is because for ldb_search(), a NULL format string # is not an error, but some compilers complain about that. if CHECK_CFLAGS(conf, ["-Werror=format", "-Wformat=2"], ''' int testformat(char *format, ...) __attribute__ ((format (__printf__, 1, 2))); int main(void) { testformat(0); return 0; } '''): if not 'EXTRA_CFLAGS' in conf.env: conf.env['EXTRA_CFLAGS'] = [] conf.env['EXTRA_CFLAGS'].extend(TO_LIST("-Werror=format")) if not Options.options.disable_warnings_as_errors: conf.ADD_NAMED_CFLAGS('PICKY_CFLAGS', '-Werror -Wno-error=deprecated-declarations', testflags=True) conf.ADD_NAMED_CFLAGS('PICKY_CFLAGS', '-Wno-error=tautological-compare', testflags=True) if Options.options.fatal_errors: conf.ADD_CFLAGS('-Wfatal-errors', testflags=True) if Options.options.pedantic: conf.ADD_CFLAGS('-W', testflags=True) if (Options.options.address_sanitizer or Options.options.undefined_sanitizer): conf.ADD_CFLAGS('-g -O1', testflags=True) if Options.options.address_sanitizer: conf.ADD_CFLAGS('-fno-omit-frame-pointer', testflags=True) conf.ADD_CFLAGS('-fsanitize=address', testflags=True) conf.ADD_LDFLAGS('-fsanitize=address', testflags=True) conf.env['ADDRESS_SANITIZER'] = True if Options.options.undefined_sanitizer: conf.ADD_CFLAGS('-fsanitize=undefined', testflags=True) conf.ADD_CFLAGS('-fsanitize=null', testflags=True) conf.ADD_CFLAGS('-fsanitize=alignment', testflags=True) conf.ADD_LDFLAGS('-fsanitize=undefined', testflags=True) conf.env['UNDEFINED_SANITIZER'] = True # Let people pass an additional ADDITIONAL_{CFLAGS,LDFLAGS} # environment variables which are only used the for final build. # # The CFLAGS and LDFLAGS environment variables are also # used for the configure checks which might impact their results. conf.add_os_flags('ADDITIONAL_CFLAGS') if conf.env.ADDITIONAL_CFLAGS and conf.CHECK_CFLAGS(conf.env['ADDITIONAL_CFLAGS']): conf.env['EXTRA_CFLAGS'].extend(conf.env['ADDITIONAL_CFLAGS']) conf.add_os_flags('ADDITIONAL_LDFLAGS') if conf.env.ADDITIONAL_LDFLAGS and conf.CHECK_LDFLAGS(conf.env['ADDITIONAL_LDFLAGS']): conf.env['EXTRA_LDFLAGS'].extend(conf.env['ADDITIONAL_LDFLAGS']) if path is None: conf.write_config_header('default/config.h', top=True, remove=False) else: conf.write_config_header(os.path.join(conf.variant, path), remove=False) for key in conf.env.define_key: conf.undefine(key, from_env=False) conf.env.define_key = [] conf.SAMBA_CROSS_CHECK_COMPLETE() @conf def CONFIG_PATH(conf, name, default): '''setup a configurable path''' if not name in conf.env: if default[0] == '/': conf.env[name] = default else: conf.env[name] = conf.env['PREFIX'] + default @conf def ADD_NAMED_CFLAGS(conf, name, flags, testflags=False, prereq_flags=[]): '''add some CFLAGS to the command line optionally set testflags to ensure all the flags work ''' prereq_flags = TO_LIST(prereq_flags) if testflags: ok_flags=[] for f in flags.split(): if CHECK_CFLAGS(conf, [f] + prereq_flags): ok_flags.append(f) flags = ok_flags if not name in conf.env: conf.env[name] = [] conf.env[name].extend(TO_LIST(flags)) @conf def ADD_CFLAGS(conf, flags, testflags=False, prereq_flags=[]): '''add some CFLAGS to the command line optionally set testflags to ensure all the flags work ''' ADD_NAMED_CFLAGS(conf, 'EXTRA_CFLAGS', flags, testflags=testflags, prereq_flags=prereq_flags) @conf def ADD_LDFLAGS(conf, flags, testflags=False): '''add some LDFLAGS to the command line optionally set testflags to ensure all the flags work this will return the flags that are added, if any ''' if testflags: ok_flags=[] for f in flags.split(): if CHECK_LDFLAGS(conf, f): ok_flags.append(f) flags = ok_flags if not 'EXTRA_LDFLAGS' in conf.env: conf.env['EXTRA_LDFLAGS'] = [] conf.env['EXTRA_LDFLAGS'].extend(TO_LIST(flags)) return flags @conf def ADD_EXTRA_INCLUDES(conf, includes): '''add some extra include directories to all builds''' if not 'EXTRA_INCLUDES' in conf.env: conf.env['EXTRA_INCLUDES'] = [] conf.env['EXTRA_INCLUDES'].extend(TO_LIST(includes)) def CURRENT_CFLAGS(bld, target, cflags, allow_warnings=False, hide_symbols=False): '''work out the current flags. local flags are added first''' ret = TO_LIST(cflags) if not 'EXTRA_CFLAGS' in bld.env: list = [] else: list = bld.env['EXTRA_CFLAGS']; ret.extend(list) if not allow_warnings and 'PICKY_CFLAGS' in bld.env: list = bld.env['PICKY_CFLAGS']; ret.extend(list) if hide_symbols and bld.env.HAVE_VISIBILITY_ATTR: ret.append(bld.env.VISIBILITY_CFLAGS) return ret @conf def CHECK_CC_ENV(conf): """trim whitespaces from 'CC'. The build farm sometimes puts a space at the start""" if os.environ.get('CC'): conf.env.CC = TO_LIST(os.environ.get('CC')) @conf def SETUP_CONFIGURE_CACHE(conf, enable): '''enable/disable cache of configure results''' if enable: # when -C is chosen, we will use a private cache and will # not look into system includes. This roughtly matches what # autoconf does with -C cache_path = os.path.join(conf.bldnode.abspath(), '.confcache') mkdir_p(cache_path) Options.cache_global = os.environ['WAFCACHE'] = cache_path else: # when -C is not chosen we will not cache configure checks # We set the recursion limit low to prevent waf from spending # a lot of time on the signatures of the files. Options.cache_global = os.environ['WAFCACHE'] = '' preproc.recursion_limit = 1 # in either case we don't need to scan system includes preproc.go_absolute = False @conf def SAMBA_CHECK_UNDEFINED_SYMBOL_FLAGS(conf): if Options.options.address_sanitizer or Options.options.enable_libfuzzer: # Sanitizers can rely on symbols undefined at library link time and the # symbols used for fuzzers are only defined by compiler wrappers. return if not sys.platform.startswith("openbsd"): # we don't want any libraries or modules to rely on runtime # resolution of symbols conf.env.undefined_ldflags = conf.ADD_LDFLAGS('-Wl,-no-undefined', testflags=True) if (conf.env.undefined_ignore_ldflags == [] and conf.CHECK_LDFLAGS(['-undefined', 'dynamic_lookup'])): conf.env.undefined_ignore_ldflags = ['-undefined', 'dynamic_lookup'] tdb-1.4.2/buildtools/wafsamba/samba_autoproto.py0000660000000000000000000000152613444661620021747 0ustar rootroot00000000000000# waf build tool for building automatic prototypes from C source import os from waflib import Build from samba_utils import SET_TARGET_TYPE, os_path_relpath def SAMBA_AUTOPROTO(bld, header, source): '''rule for samba prototype generation''' bld.SET_BUILD_GROUP('prototypes') relpath = os_path_relpath(bld.path.abspath(), bld.srcnode.abspath()) name = os.path.join(relpath, header) SET_TARGET_TYPE(bld, name, 'PROTOTYPE') t = bld( name = name, source = source, target = header, update_outputs=True, ext_out='.c', before ='c', rule = '${PERL} "${SCRIPT}/mkproto.pl" --srcdir=.. --builddir=. --public=/dev/null --private="${TGT}" ${SRC}' ) t.env.SCRIPT = os.path.join(bld.srcnode.abspath(), 'source4/script') Build.BuildContext.SAMBA_AUTOPROTO = SAMBA_AUTOPROTO tdb-1.4.2/buildtools/wafsamba/samba_bundled.py0000660000000000000000000002257213444661620021334 0ustar rootroot00000000000000# functions to support bundled libraries import sys from waflib import Build, Options, Logs from waflib.Configure import conf from wafsamba import samba_utils def PRIVATE_NAME(bld, name, private_extension, private_library): '''possibly rename a library to include a bundled extension''' if not private_library: return name # we now use the same private name for libraries as the public name. # see http://git.samba.org/?p=tridge/junkcode.git;a=tree;f=shlib for a # demonstration that this is the right thing to do # also see http://lists.samba.org/archive/samba-technical/2011-January/075816.html if private_extension: return name extension = bld.env.PRIVATE_EXTENSION if extension and name.startswith('%s' % extension): return name if extension and name.endswith('%s' % extension): return name return "%s-%s" % (name, extension) def target_in_list(target, lst, default): for l in lst: if target == l: return True if '!' + target == l: return False if l == 'ALL': return True if l == 'NONE': return False return default def BUILTIN_LIBRARY(bld, name): '''return True if a library should be builtin instead of being built as a shared lib''' return target_in_list(name, bld.env.BUILTIN_LIBRARIES, False) Build.BuildContext.BUILTIN_LIBRARY = BUILTIN_LIBRARY def BUILTIN_DEFAULT(opt, builtins): '''set a comma separated default list of builtin libraries for this package''' if 'BUILTIN_LIBRARIES_DEFAULT' in Options.options.__dict__: return Options.options.__dict__['BUILTIN_LIBRARIES_DEFAULT'] = builtins Options.OptionsContext.BUILTIN_DEFAULT = BUILTIN_DEFAULT def PRIVATE_EXTENSION_DEFAULT(opt, extension, noextension=''): '''set a default private library extension''' if 'PRIVATE_EXTENSION_DEFAULT' in Options.options.__dict__: return Options.options.__dict__['PRIVATE_EXTENSION_DEFAULT'] = extension Options.options.__dict__['PRIVATE_EXTENSION_EXCEPTION'] = noextension Options.OptionsContext.PRIVATE_EXTENSION_DEFAULT = PRIVATE_EXTENSION_DEFAULT def minimum_library_version(conf, libname, default): '''allow override of mininum system library version''' minlist = Options.options.MINIMUM_LIBRARY_VERSION if not minlist: return default for m in minlist.split(','): a = m.split(':') if len(a) != 2: Logs.error("Bad syntax for --minimum-library-version of %s" % m) sys.exit(1) if a[0] == libname: return a[1] return default @conf def LIB_MAY_BE_BUNDLED(conf, libname): if libname in conf.env.SYSTEM_LIBS: return False if libname in conf.env.BUNDLED_LIBS: return True if '!%s' % libname in conf.env.BUNDLED_LIBS: return False if 'NONE' in conf.env.BUNDLED_LIBS: return False return True @conf def LIB_MUST_BE_BUNDLED(conf, libname): if libname in conf.env.BUNDLED_LIBS: return True if '!%s' % libname in conf.env.BUNDLED_LIBS: return False if 'ALL' in conf.env.BUNDLED_LIBS: return True return False @conf def LIB_MUST_BE_PRIVATE(conf, libname): return ('ALL' in conf.env.PRIVATE_LIBS or libname in conf.env.PRIVATE_LIBS) @conf def CHECK_BUNDLED_SYSTEM_PKG(conf, libname, minversion='0.0.0', maxversion=None, version_blacklist=[], onlyif=None, implied_deps=None, pkg=None): '''check if a library is available as a system library. This only tries using pkg-config ''' return conf.CHECK_BUNDLED_SYSTEM(libname, minversion=minversion, maxversion=maxversion, version_blacklist=version_blacklist, onlyif=onlyif, implied_deps=implied_deps, pkg=pkg) @conf def CHECK_BUNDLED_SYSTEM(conf, libname, minversion='0.0.0', maxversion=None, version_blacklist=[], checkfunctions=None, headers=None, checkcode=None, onlyif=None, implied_deps=None, require_headers=True, pkg=None, set_target=True): '''check if a library is available as a system library. this first tries via pkg-config, then if that fails tries by testing for a specified function in the specified lib ''' # We always do a logic validation of 'onlyif' first missing = [] if onlyif: for l in samba_utils.TO_LIST(onlyif): f = 'FOUND_SYSTEMLIB_%s' % l if not f in conf.env: Logs.error('ERROR: CHECK_BUNDLED_SYSTEM(%s) - ' % (libname) + 'missing prerequisite check for ' + 'system library %s, onlyif=%r' % (l, onlyif)) sys.exit(1) if not conf.env[f]: missing.append(l) found = 'FOUND_SYSTEMLIB_%s' % libname if found in conf.env: return conf.env[found] if conf.LIB_MUST_BE_BUNDLED(libname): conf.env[found] = False return False # see if the library should only use a system version if another dependent # system version is found. That prevents possible use of mixed library # versions if missing: if not conf.LIB_MAY_BE_BUNDLED(libname): Logs.error('ERROR: Use of system library %s depends on missing system library/libraries %r' % (libname, missing)) sys.exit(1) conf.env[found] = False return False def check_functions_headers_code(): '''helper function for CHECK_BUNDLED_SYSTEM''' if require_headers and headers and not conf.CHECK_HEADERS(headers, lib=libname): return False if checkfunctions is not None: ok = conf.CHECK_FUNCS_IN(checkfunctions, libname, headers=headers, empty_decl=False, set_target=False) if not ok: return False if checkcode is not None: define='CHECK_BUNDLED_SYSTEM_%s' % libname.upper() ok = conf.CHECK_CODE(checkcode, lib=libname, headers=headers, local_include=False, msg=msg, define=define) conf.CONFIG_RESET(define) if not ok: return False return True minversion = minimum_library_version(conf, libname, minversion) msg = 'Checking for system %s' % libname msg_ver = [] if minversion != '0.0.0': msg_ver.append('>=%s' % minversion) if maxversion is not None: msg_ver.append('<=%s' % maxversion) for v in version_blacklist: msg_ver.append('!=%s' % v) if msg_ver != []: msg += " (%s)" % (" ".join(msg_ver)) uselib_store=libname.upper() if pkg is None: pkg = libname version_checks = '%s >= %s' % (pkg, minversion) if maxversion is not None: version_checks += ' %s <= %s' % (pkg, maxversion) for v in version_blacklist: version_checks += ' %s != %s' % (pkg, v) # try pkgconfig first if (conf.CHECK_CFG(package=pkg, args='"%s" --cflags --libs' % (version_checks), msg=msg, uselib_store=uselib_store) and check_functions_headers_code()): if set_target: conf.SET_TARGET_TYPE(libname, 'SYSLIB') conf.env[found] = True if implied_deps: conf.SET_SYSLIB_DEPS(libname, implied_deps) return True if checkfunctions is not None: if check_functions_headers_code(): conf.env[found] = True if implied_deps: conf.SET_SYSLIB_DEPS(libname, implied_deps) if set_target: conf.SET_TARGET_TYPE(libname, 'SYSLIB') return True conf.env[found] = False if not conf.LIB_MAY_BE_BUNDLED(libname): Logs.error('ERROR: System library %s of version %s not found, and bundling disabled' % (libname, minversion)) sys.exit(1) return False def tuplize_version(version): return tuple([int(x) for x in version.split(".")]) @conf def CHECK_BUNDLED_SYSTEM_PYTHON(conf, libname, modulename, minversion='0.0.0'): '''check if a python module is available on the system and has the specified minimum version. ''' if conf.LIB_MUST_BE_BUNDLED(libname): return False # see if the library should only use a system version if another dependent # system version is found. That prevents possible use of mixed library # versions minversion = minimum_library_version(conf, libname, minversion) try: m = __import__(modulename) except ImportError: found = False else: try: version = m.__version__ except AttributeError: found = False else: found = tuplize_version(version) >= tuplize_version(minversion) if not found and not conf.LIB_MAY_BE_BUNDLED(libname): Logs.error('ERROR: Python module %s of version %s not found, and bundling disabled' % (libname, minversion)) sys.exit(1) return found def NONSHARED_BINARY(bld, name): '''return True if a binary should be built without non-system shared libs''' return target_in_list(name, bld.env.NONSHARED_BINARIES, False) Build.BuildContext.NONSHARED_BINARY = NONSHARED_BINARY tdb-1.4.2/buildtools/wafsamba/samba_conftests.py0000660000000000000000000004025213444661620021722 0ustar rootroot00000000000000# a set of config tests that use the samba_autoconf functions # to test for commonly needed configuration options import os, shutil, re from waflib import Build, Configure, Utils, Options, Logs, Errors from waflib.Configure import conf from samba_utils import TO_LIST, ADD_LD_LIBRARY_PATH, get_string def add_option(self, *k, **kw): '''syntax help: provide the "match" attribute to opt.add_option() so that folders can be added to specific config tests''' Options.OptionsContext.parser = self match = kw.get('match', []) if match: del kw['match'] opt = self.parser.add_option(*k, **kw) opt.match = match return opt Options.OptionsContext.add_option = add_option @conf def check(self, *k, **kw): '''Override the waf defaults to inject --with-directory options''' if not 'env' in kw: kw['env'] = self.env.derive() # match the configuration test with specific options, for example: # --with-libiconv -> Options.options.iconv_open -> "Checking for library iconv" additional_dirs = [] if 'msg' in kw: msg = kw['msg'] for x in Options.OptionsContext.parser.parser.option_list: if getattr(x, 'match', None) and msg in x.match: d = getattr(Options.options, x.dest, '') if d: additional_dirs.append(d) # we add the additional dirs twice: once for the test data, and again if the compilation test suceeds below def add_options_dir(dirs, env): for x in dirs: if not x in env.CPPPATH: env.CPPPATH = [os.path.join(x, 'include')] + env.CPPPATH if not x in env.LIBPATH: env.LIBPATH = [os.path.join(x, 'lib')] + env.LIBPATH add_options_dir(additional_dirs, kw['env']) self.validate_c(kw) self.start_msg(kw['msg']) ret = None try: ret = self.run_c_code(*k, **kw) except Configure.ConfigurationError as e: self.end_msg(kw['errmsg'], 'YELLOW') if 'mandatory' in kw and kw['mandatory']: if Logs.verbose > 1: raise else: self.fatal('the configuration failed (see %r)' % self.log.name) else: kw['success'] = ret self.end_msg(self.ret_msg(kw['okmsg'], kw)) # success! keep the CPPPATH/LIBPATH add_options_dir(additional_dirs, self.env) self.post_check(*k, **kw) if not kw.get('execute', False): return ret == 0 return ret @conf def CHECK_ICONV(conf, define='HAVE_NATIVE_ICONV'): '''check if the iconv library is installed optionally pass a define''' if conf.CHECK_FUNCS_IN('iconv_open', 'iconv', checklibc=True, headers='iconv.h'): conf.DEFINE(define, 1) return True return False @conf def CHECK_LARGEFILE(conf, define='HAVE_LARGEFILE'): '''see what we need for largefile support''' getconf_cflags = conf.CHECK_COMMAND(['getconf', 'LFS_CFLAGS']); if getconf_cflags is not False: if (conf.CHECK_CODE('if (sizeof(off_t) < 8) return 1', define='WORKING_GETCONF_LFS_CFLAGS', execute=True, cflags=getconf_cflags, msg='Checking getconf large file support flags work')): conf.ADD_CFLAGS(getconf_cflags) getconf_cflags_list=TO_LIST(getconf_cflags) for flag in getconf_cflags_list: if flag[:2] == "-D": flag_split = flag[2:].split('=') if len(flag_split) == 1: conf.DEFINE(flag_split[0], '1') else: conf.DEFINE(flag_split[0], flag_split[1]) if conf.CHECK_CODE('if (sizeof(off_t) < 8) return 1', define, execute=True, msg='Checking for large file support without additional flags'): return True if conf.CHECK_CODE('if (sizeof(off_t) < 8) return 1', define, execute=True, cflags='-D_FILE_OFFSET_BITS=64', msg='Checking for -D_FILE_OFFSET_BITS=64'): conf.DEFINE('_FILE_OFFSET_BITS', 64) return True if conf.CHECK_CODE('if (sizeof(off_t) < 8) return 1', define, execute=True, cflags='-D_LARGE_FILES', msg='Checking for -D_LARGE_FILES'): conf.DEFINE('_LARGE_FILES', 1) return True return False @conf def CHECK_C_PROTOTYPE(conf, function, prototype, define, headers=None, msg=None): '''verify that a C prototype matches the one on the current system''' if not conf.CHECK_DECLS(function, headers=headers): return False if not msg: msg = 'Checking C prototype for %s' % function return conf.CHECK_CODE('%s; void *_x = (void *)%s' % (prototype, function), define=define, local_include=False, headers=headers, link=False, execute=False, msg=msg) @conf def CHECK_CHARSET_EXISTS(conf, charset, outcharset='UCS-2LE', headers=None, define=None): '''check that a named charset is able to be used with iconv_open() for conversion to a target charset ''' msg = 'Checking if can we convert from %s to %s' % (charset, outcharset) if define is None: define = 'HAVE_CHARSET_%s' % charset.upper().replace('-','_') return conf.CHECK_CODE(''' iconv_t cd = iconv_open("%s", "%s"); if (cd == 0 || cd == (iconv_t)-1) return -1; ''' % (charset, outcharset), define=define, execute=True, msg=msg, lib='iconv', headers=headers) def find_config_dir(conf): '''find a directory to run tests in''' k = 0 while k < 10000: dir = os.path.join(conf.bldnode.abspath(), '.conf_check_%d' % k) try: shutil.rmtree(dir) except OSError: pass try: os.stat(dir) except: break k += 1 try: os.makedirs(dir) except: conf.fatal('cannot create a configuration test folder %r' % dir) try: os.stat(dir) except: conf.fatal('cannot use the configuration test folder %r' % dir) return dir @conf def CHECK_SHLIB_INTRASINC_NAME_FLAGS(conf, msg): ''' check if the waf default flags for setting the name of lib are ok ''' snip = ''' int foo(int v) { return v * 2; } ''' return conf.check(features='c cshlib',vnum="1",fragment=snip,msg=msg, mandatory=False) @conf def CHECK_NEED_LC(conf, msg): '''check if we need -lc''' dir = find_config_dir(conf) env = conf.env bdir = os.path.join(dir, 'testbuild2') if not os.path.exists(bdir): os.makedirs(bdir) subdir = os.path.join(dir, "liblctest") os.makedirs(subdir) Utils.writef(os.path.join(subdir, 'liblc1.c'), '#include \nint lib_func(void) { FILE *f = fopen("foo", "r");}\n') bld = Build.BuildContext() bld.log = conf.log bld.all_envs.update(conf.all_envs) bld.all_envs['default'] = env bld.lst_variants = bld.all_envs.keys() bld.load_dirs(dir, bdir) bld.rescan(bld.srcnode) bld(features='c cshlib', source='liblctest/liblc1.c', ldflags=conf.env['EXTRA_LDFLAGS'], target='liblc', name='liblc') try: bld.compile() conf.check_message(msg, '', True) return True except: conf.check_message(msg, '', False) return False @conf def CHECK_SHLIB_W_PYTHON(conf, msg): '''check if we need -undefined dynamic_lookup''' dir = find_config_dir(conf) snip = ''' #include #include #define environ (*_NSGetEnviron()) static PyObject *ldb_module = NULL; int foo(int v) { extern char **environ; environ[0] = 1; ldb_module = PyImport_ImportModule("ldb"); return v * 2; } ''' return conf.check(features='c cshlib',uselib='PYEMBED',fragment=snip,msg=msg, mandatory=False) # this one is quite complex, and should probably be broken up # into several parts. I'd quite like to create a set of CHECK_COMPOUND() # functions that make writing complex compound tests like this much easier @conf def CHECK_LIBRARY_SUPPORT(conf, rpath=False, version_script=False, msg=None): '''see if the platform supports building libraries''' if msg is None: if rpath: msg = "rpath library support" else: msg = "building library support" dir = find_config_dir(conf) bdir = os.path.join(dir, 'testbuild') if not os.path.exists(bdir): os.makedirs(bdir) env = conf.env subdir = os.path.join(dir, "libdir") os.makedirs(subdir) Utils.writef(os.path.join(subdir, 'lib1.c'), 'int lib_func(void) { return 42; }\n') Utils.writef(os.path.join(dir, 'main.c'), 'int lib_func(void);\n' 'int main(void) {return !(lib_func() == 42);}\n') bld = Build.BuildContext() bld.log = conf.log bld.all_envs.update(conf.all_envs) bld.all_envs['default'] = env bld.lst_variants = bld.all_envs.keys() bld.load_dirs(dir, bdir) bld.rescan(bld.srcnode) ldflags = [] if version_script: ldflags.append("-Wl,--version-script=%s/vscript" % bld.path.abspath()) Utils.writef(os.path.join(dir,'vscript'), 'TEST_1.0A2 { global: *; };\n') bld(features='c cshlib', source='libdir/lib1.c', target='libdir/lib1', ldflags=ldflags, name='lib1') o = bld(features='c cprogram', source='main.c', target='prog1', uselib_local='lib1') if rpath: o.rpath=os.path.join(bdir, 'default/libdir') # compile the program try: bld.compile() except: conf.check_message(msg, '', False) return False # path for execution lastprog = o.link_task.outputs[0].abspath(env) if not rpath: if 'LD_LIBRARY_PATH' in os.environ: old_ld_library_path = os.environ['LD_LIBRARY_PATH'] else: old_ld_library_path = None ADD_LD_LIBRARY_PATH(os.path.join(bdir, 'default/libdir')) # we need to run the program, try to get its result args = conf.SAMBA_CROSS_ARGS(msg=msg) proc = Utils.subprocess.Popen([lastprog] + args, stdout=Utils.subprocess.PIPE, stderr=Utils.subprocess.PIPE) (out, err) = proc.communicate() w = conf.log.write w(str(out)) w('\n') w(str(err)) w('\nreturncode %r\n' % proc.returncode) ret = (proc.returncode == 0) if not rpath: os.environ['LD_LIBRARY_PATH'] = old_ld_library_path or '' conf.check_message(msg, '', ret) return ret @conf def CHECK_PERL_MANPAGE(conf, msg=None, section=None): '''work out what extension perl uses for manpages''' if msg is None: if section: msg = "perl man%s extension" % section else: msg = "perl manpage generation" conf.start_msg(msg) dir = find_config_dir(conf) bdir = os.path.join(dir, 'testbuild') if not os.path.exists(bdir): os.makedirs(bdir) Utils.writef(os.path.join(bdir, 'Makefile.PL'), """ use ExtUtils::MakeMaker; WriteMakefile( 'NAME' => 'WafTest', 'EXE_FILES' => [ 'WafTest' ] ); """) back = os.path.abspath('.') os.chdir(bdir) proc = Utils.subprocess.Popen(['perl', 'Makefile.PL'], stdout=Utils.subprocess.PIPE, stderr=Utils.subprocess.PIPE) (out, err) = proc.communicate() os.chdir(back) ret = (proc.returncode == 0) if not ret: conf.end_msg('not found', color='YELLOW') return if section: man = Utils.readf(os.path.join(bdir,'Makefile')) m = re.search('MAN%sEXT\s+=\s+(\w+)' % section, man) if not m: conf.end_msg('not found', color='YELLOW') return ext = m.group(1) conf.end_msg(ext) return ext conf.end_msg('ok') return True @conf def CHECK_COMMAND(conf, cmd, msg=None, define=None, on_target=True, boolean=False): '''run a command and return result''' if msg is None: msg = 'Checking %s' % ' '.join(cmd) conf.COMPOUND_START(msg) cmd = cmd[:] if on_target: cmd.extend(conf.SAMBA_CROSS_ARGS(msg=msg)) try: ret = get_string(Utils.cmd_output(cmd)) except: conf.COMPOUND_END(False) return False if boolean: conf.COMPOUND_END('ok') if define: conf.DEFINE(define, '1') else: ret = ret.strip() conf.COMPOUND_END(ret) if define: conf.DEFINE(define, ret, quote=True) return ret @conf def CHECK_UNAME(conf): '''setup SYSTEM_UNAME_* defines''' ret = True for v in "sysname machine release version".split(): if not conf.CHECK_CODE(''' int printf(const char *format, ...); struct utsname n; if (uname(&n) == -1) return -1; printf("%%s", n.%s); ''' % v, define='SYSTEM_UNAME_%s' % v.upper(), execute=True, define_ret=True, quote=True, headers='sys/utsname.h', local_include=False, msg="Checking uname %s type" % v): ret = False return ret @conf def CHECK_INLINE(conf): '''check for the right value for inline''' conf.COMPOUND_START('Checking for inline') for i in ['inline', '__inline__', '__inline']: ret = conf.CHECK_CODE(''' typedef int foo_t; static %s foo_t static_foo () {return 0; } %s foo_t foo () {return 0; }\n''' % (i, i), define='INLINE_MACRO', addmain=False, link=False) if ret: if i != 'inline': conf.DEFINE('inline', i, quote=False) break if not ret: conf.COMPOUND_END(ret) else: conf.COMPOUND_END(i) return ret @conf def CHECK_XSLTPROC_MANPAGES(conf): '''check if xsltproc can run with the given stylesheets''' if not conf.CONFIG_SET('XSLTPROC'): conf.find_program('xsltproc', var='XSLTPROC') if not conf.CONFIG_SET('XSLTPROC'): return False s='http://docbook.sourceforge.net/release/xsl/current/manpages/docbook.xsl' conf.CHECK_COMMAND('%s --nonet %s 2> /dev/null' % (conf.env.get_flat('XSLTPROC'), s), msg='Checking for stylesheet %s' % s, define='XSLTPROC_MANPAGES', on_target=False, boolean=True) if not conf.CONFIG_SET('XSLTPROC_MANPAGES'): print("A local copy of the docbook.xsl wasn't found on your system" \ " consider installing package like docbook-xsl") # # Determine the standard libpath for the used compiler, # so we can later use that to filter out these standard # library paths when some tools like cups-config or # python-config report standard lib paths with their # ldflags (-L...) # @conf def CHECK_STANDARD_LIBPATH(conf): # at least gcc and clang support this: try: cmd = conf.env.CC + ['-print-search-dirs'] out = get_string(Utils.cmd_output(cmd)).split('\n') except ValueError: # option not supported by compiler - use a standard list of directories dirlist = [ '/usr/lib', '/usr/lib64' ] except: raise Errors.WafError('Unexpected error running "%s"' % (cmd)) else: dirlist = [] for line in out: line = line.strip() if line.startswith("libraries: ="): dirliststr = line[len("libraries: ="):] dirlist = [ os.path.normpath(x) for x in dirliststr.split(':') ] break conf.env.STANDARD_LIBPATH = dirlist tdb-1.4.2/buildtools/wafsamba/samba_cross.py0000660000000000000000000001334413444661620021045 0ustar rootroot00000000000000# functions for handling cross-compilation import os, sys, re, shlex from waflib import Utils, Logs, Options, Errors, Context from waflib.Configure import conf from wafsamba import samba_utils real_Popen = None ANSWER_UNKNOWN = (254, "") ANSWER_NO = (1, "") ANSWER_OK = (0, "") cross_answers_incomplete = False def add_answer(ca_file, msg, answer): '''add an answer to a set of cross answers''' try: f = open(ca_file, 'a') except: Logs.error("Unable to open cross-answers file %s" % ca_file) sys.exit(1) (retcode, retstring) = answer # if retstring is more than one line then we probably # don't care about its actual content (the tests should # yield one-line output in order to comply with the cross-answer # format) retstring = retstring.strip() if len(retstring.split('\n')) > 1: retstring = '' answer = (retcode, retstring) if answer == ANSWER_OK: f.write('%s: OK\n' % msg) elif answer == ANSWER_UNKNOWN: f.write('%s: UNKNOWN\n' % msg) elif answer == ANSWER_NO: f.write('%s: NO\n' % msg) else: if retcode == 0: f.write('%s: "%s"\n' % (msg, retstring)) else: f.write('%s: (%d, "%s")\n' % (msg, retcode, retstring)) f.close() def cross_answer(ca_file, msg): '''return a (retcode,retstring) tuple from a answers file''' try: f = open(ca_file, 'r') except: return ANSWER_UNKNOWN for line in f: line = line.strip() if line == '' or line[0] == '#': continue if line.find(':') != -1: a = line.split(':', 1) thismsg = a[0].strip() if thismsg != msg: continue ans = a[1].strip() if ans == "OK" or ans == "YES": f.close() return ANSWER_OK elif ans == "UNKNOWN": f.close() return ANSWER_UNKNOWN elif ans == "FAIL" or ans == "NO": f.close() return ANSWER_NO elif ans[0] == '"': f.close() return (0, ans.strip('"')) elif ans[0] == "'": f.close() return (0, ans.strip("'")) else: m = re.match('\(\s*(-?\d+)\s*,\s*\"(.*)\"\s*\)', ans) if m: f.close() return (int(m.group(1)), m.group(2)) else: raise Errors.WafError("Bad answer format '%s' in %s" % (line, ca_file)) f.close() return ANSWER_UNKNOWN class cross_Popen(Utils.subprocess.Popen): '''cross-compilation wrapper for Popen''' def __init__(*k, **kw): (obj, args) = k use_answers = False ans = ANSWER_UNKNOWN # Three possibilities: # 1. Only cross-answers - try the cross-answers file, and if # there's no corresponding answer, add to the file and mark # the configure process as unfinished. # 2. Only cross-execute - get the answer from cross-execute # 3. Both - try the cross-answers file, and if there is no # corresponding answer - use cross-execute to get an answer, # and add that answer to the file. if '--cross-answers' in args: # when --cross-answers is set, then change the arguments # to use the cross answers if available use_answers = True i = args.index('--cross-answers') ca_file = args[i+1] msg = args[i+2] ans = cross_answer(ca_file, msg) if '--cross-execute' in args and ans == ANSWER_UNKNOWN: # when --cross-execute is set, then change the arguments # to use the cross emulator i = args.index('--cross-execute') newargs = shlex.split(args[i+1]) newargs.extend(args[0:i]) if use_answers: p = real_Popen(newargs, stdout=Utils.subprocess.PIPE, stderr=Utils.subprocess.PIPE) ce_out, ce_err = p.communicate() ans = (p.returncode, samba_utils.get_string(ce_out)) add_answer(ca_file, msg, ans) else: args = newargs if use_answers: if ans == ANSWER_UNKNOWN: global cross_answers_incomplete cross_answers_incomplete = True add_answer(ca_file, msg, ans) (retcode, retstring) = ans args = ['/bin/sh', '-c', "echo -n '%s'; exit %d" % (retstring, retcode)] real_Popen.__init__(*(obj, args), **kw) @conf def SAMBA_CROSS_ARGS(conf, msg=None): '''get exec_args to pass when running cross compiled binaries''' if not conf.env.CROSS_COMPILE: return [] global real_Popen if real_Popen is None: real_Popen = Utils.subprocess.Popen Utils.subprocess.Popen = cross_Popen ret = [] if conf.env.CROSS_EXECUTE: ret.extend(['--cross-execute', conf.env.CROSS_EXECUTE]) if conf.env.CROSS_ANSWERS: if msg is None: raise Errors.WafError("Cannot have NULL msg in cross-answers") ret.extend(['--cross-answers', os.path.join(Context.launch_dir, conf.env.CROSS_ANSWERS), msg]) if ret == []: raise Errors.WafError("Cannot cross-compile without either --cross-execute or --cross-answers") return ret @conf def SAMBA_CROSS_CHECK_COMPLETE(conf): '''check if we have some unanswered questions''' global cross_answers_incomplete if conf.env.CROSS_COMPILE and cross_answers_incomplete: raise Errors.WafError("Cross answers file %s is incomplete" % conf.env.CROSS_ANSWERS) return True tdb-1.4.2/buildtools/wafsamba/samba_deps.py0000660000000000000000000012176313527011454020650 0ustar rootroot00000000000000# Samba automatic dependency handling and project rules import os, sys, re from waflib import Build, Options, Logs, Utils, Errors from waflib.Logs import debug from waflib.Configure import conf from waflib import ConfigSet from samba_bundled import BUILTIN_LIBRARY from samba_utils import LOCAL_CACHE, TO_LIST, get_tgt_list, unique_list, os_path_relpath from samba_autoconf import library_flags @conf def ADD_GLOBAL_DEPENDENCY(ctx, dep): '''add a dependency for all binaries and libraries''' if not 'GLOBAL_DEPENDENCIES' in ctx.env: ctx.env.GLOBAL_DEPENDENCIES = [] ctx.env.GLOBAL_DEPENDENCIES.append(dep) @conf def BREAK_CIRCULAR_LIBRARY_DEPENDENCIES(ctx): '''indicate that circular dependencies between libraries should be broken.''' ctx.env.ALLOW_CIRCULAR_LIB_DEPENDENCIES = True @conf def SET_SYSLIB_DEPS(conf, target, deps): '''setup some implied dependencies for a SYSLIB''' cache = LOCAL_CACHE(conf, 'SYSLIB_DEPS') cache[target] = deps def expand_subsystem_deps(bld): '''expand the reverse dependencies resulting from subsystem attributes of modules. This is walking over the complete list of declared subsystems, and expands the samba_deps_extended list for any module<->subsystem dependencies''' subsystem_list = LOCAL_CACHE(bld, 'INIT_FUNCTIONS') targets = LOCAL_CACHE(bld, 'TARGET_TYPE') for subsystem_name in subsystem_list: bld.ASSERT(subsystem_name in targets, "Subsystem target %s not declared" % subsystem_name) type = targets[subsystem_name] if type == 'DISABLED' or type == 'EMPTY': continue # for example, # subsystem_name = dcerpc_server (a subsystem) # subsystem = dcerpc_server (a subsystem object) # module_name = rpc_epmapper (a module within the dcerpc_server subsystem) # module = rpc_epmapper (a module object within the dcerpc_server subsystem) subsystem = bld.get_tgen_by_name(subsystem_name) bld.ASSERT(subsystem is not None, "Unable to find subsystem %s" % subsystem_name) for d in subsystem_list[subsystem_name]: module_name = d['TARGET'] module_type = targets[module_name] if module_type in ['DISABLED', 'EMPTY']: continue bld.ASSERT(subsystem is not None, "Subsystem target %s for %s (%s) not found" % (subsystem_name, module_name, module_type)) if module_type in ['SUBSYSTEM']: # if a module is a plain object type (not a library) then the # subsystem it is part of needs to have it as a dependency, so targets # that depend on this subsystem get the modules of that subsystem subsystem.samba_deps_extended.append(module_name) subsystem.samba_deps_extended = unique_list(subsystem.samba_deps_extended) def build_dependencies(self): '''This builds the dependency list for a target. It runs after all the targets are declared The reason this is not just done in the SAMBA_*() rules is that we have no way of knowing the full dependency list for a target until we have all of the targets declared. ''' if self.samba_type in ['LIBRARY', 'BINARY', 'PYTHON']: self.uselib = list(self.final_syslibs) self.uselib_local = list(self.final_libs) self.add_objects = list(self.final_objects) # extra link flags from pkg_config libs = self.final_syslibs.copy() (cflags, ldflags, cpppath) = library_flags(self, list(libs)) new_ldflags = getattr(self, 'samba_ldflags', [])[:] new_ldflags.extend(ldflags) self.ldflags = new_ldflags if getattr(self, 'allow_undefined_symbols', False) and self.env.undefined_ldflags: for f in self.env.undefined_ldflags: self.ldflags.remove(f) if getattr(self, 'allow_undefined_symbols', False) and self.env.undefined_ignore_ldflags: for f in self.env.undefined_ignore_ldflags: self.ldflags.append(f) debug('deps: computed dependencies for target %s: uselib=%s uselib_local=%s add_objects=%s', self.sname, self.uselib, self.uselib_local, self.add_objects) if self.samba_type in ['SUBSYSTEM']: # this is needed for the cflags of libs that come from pkg_config self.uselib = list(self.final_syslibs) self.uselib.extend(list(self.direct_syslibs)) for lib in self.final_libs: t = self.bld.get_tgen_by_name(lib) self.uselib.extend(list(t.final_syslibs)) self.uselib = unique_list(self.uselib) if getattr(self, 'uselib', None): up_list = [] for l in self.uselib: up_list.append(l.upper()) self.uselib = up_list def build_includes(self): '''This builds the right set of includes for a target. One tricky part of this is that the includes= attribute for a target needs to use paths which are relative to that targets declaration directory (which we can get at via t.path). The way this works is the includes list gets added as samba_includes in the main build task declaration. Then this function runs after all of the tasks are declared, and it processes the samba_includes attribute to produce a includes= attribute ''' if getattr(self, 'samba_includes', None) is None: return bld = self.bld inc_deps = includes_objects(bld, self, set(), {}) includes = [] # maybe add local includes if getattr(self, 'local_include', True) and getattr(self, 'local_include_first', True): includes.append('.') includes.extend(self.samba_includes_extended) if 'EXTRA_INCLUDES' in bld.env and getattr(self, 'global_include', True): includes.extend(bld.env['EXTRA_INCLUDES']) includes.append('#') inc_set = set() inc_abs = [] for d in inc_deps: t = bld.get_tgen_by_name(d) bld.ASSERT(t is not None, "Unable to find dependency %s for %s" % (d, self.sname)) inclist = getattr(t, 'samba_includes_extended', [])[:] if getattr(t, 'local_include', True): inclist.append('.') if inclist == []: continue tpath = t.samba_abspath for inc in inclist: npath = tpath + '/' + inc if not npath in inc_set: inc_abs.append(npath) inc_set.add(npath) mypath = self.path.abspath(bld.env) for inc in inc_abs: relpath = os_path_relpath(inc, mypath) includes.append(relpath) if getattr(self, 'local_include', True) and not getattr(self, 'local_include_first', True): includes.append('.') # now transform the includes list to be relative to the top directory # which is represented by '#' in waf. This allows waf to cache the # includes lists more efficiently includes_top = [] for i in includes: if i[0] == '#': # some are already top based includes_top.append(i) continue absinc = os.path.join(self.path.abspath(), i) relinc = os_path_relpath(absinc, self.bld.srcnode.abspath()) includes_top.append('#' + relinc) self.includes = unique_list(includes_top) debug('deps: includes for target %s: includes=%s', self.sname, self.includes) def add_init_functions(self): '''This builds the right set of init functions''' bld = self.bld subsystems = LOCAL_CACHE(bld, 'INIT_FUNCTIONS') # cope with the separated object lists from BINARY and LIBRARY targets sname = self.sname if sname.endswith('.objlist'): sname = sname[0:-8] modules = [] if sname in subsystems: modules.append(sname) m = getattr(self, 'samba_modules', None) if m is not None: modules.extend(TO_LIST(m)) m = getattr(self, 'samba_subsystem', None) if m is not None: modules.append(m) if 'pyembed' in self.features: return sentinel = getattr(self, 'init_function_sentinel', 'NULL') targets = LOCAL_CACHE(bld, 'TARGET_TYPE') cflags = getattr(self, 'samba_cflags', [])[:] if modules == []: sname = sname.replace('-','_') sname = sname.replace('.','_') sname = sname.replace('/','_') cflags.append('-DSTATIC_%s_MODULES=%s' % (sname, sentinel)) if sentinel == 'NULL': proto = "extern void __%s_dummy_module_proto(void)" % (sname) cflags.append('-DSTATIC_%s_MODULES_PROTO=%s' % (sname, proto)) self.cflags = cflags return for m in modules: bld.ASSERT(m in subsystems, "No init_function defined for module '%s' in target '%s'" % (m, self.sname)) init_fn_list = [] for d in subsystems[m]: if targets[d['TARGET']] != 'DISABLED': init_fn_list.append(d['INIT_FUNCTION']) if init_fn_list == []: cflags.append('-DSTATIC_%s_MODULES=%s' % (m, sentinel)) if sentinel == 'NULL': proto = "extern void __%s_dummy_module_proto(void)" % (m) cflags.append('-DSTATIC_%s_MODULES_PROTO=%s' % (m, proto)) else: cflags.append('-DSTATIC_%s_MODULES=%s' % (m, ','.join(init_fn_list) + ',' + sentinel)) proto='' for f in init_fn_list: proto += '_MODULE_PROTO(%s)' % f proto += "extern void __%s_dummy_module_proto(void)" % (m) cflags.append('-DSTATIC_%s_MODULES_PROTO=%s' % (m, proto)) self.cflags = cflags def check_duplicate_sources(bld, tgt_list): '''see if we are compiling the same source file more than once''' debug('deps: checking for duplicate sources') targets = LOCAL_CACHE(bld, 'TARGET_TYPE') for t in tgt_list: source_list = TO_LIST(getattr(t, 'source', '')) tpath = os.path.normpath(os_path_relpath(t.path.abspath(bld.env), t.env.BUILD_DIRECTORY + '/default')) obj_sources = set() for s in source_list: if not isinstance(s, str): print('strange path in check_duplicate_sources %r' % s) s = s.abspath() p = os.path.normpath(os.path.join(tpath, s)) if p in obj_sources: Logs.error("ERROR: source %s appears twice in target '%s'" % (p, t.sname)) sys.exit(1) obj_sources.add(p) t.samba_source_set = obj_sources subsystems = {} # build a list of targets that each source file is part of for t in tgt_list: if not targets[t.sname] in [ 'LIBRARY', 'BINARY', 'PYTHON' ]: continue for obj in t.add_objects: t2 = t.bld.get_tgen_by_name(obj) source_set = getattr(t2, 'samba_source_set', set()) for s in source_set: if not s in subsystems: subsystems[s] = {} if not t.sname in subsystems[s]: subsystems[s][t.sname] = [] subsystems[s][t.sname].append(t2.sname) for s in subsystems: if len(subsystems[s]) > 1 and Options.options.SHOW_DUPLICATES: Logs.warn("WARNING: source %s is in more than one target: %s" % (s, subsystems[s].keys())) for tname in subsystems[s]: if len(subsystems[s][tname]) > 1: raise Errors.WafError("ERROR: source %s is in more than one subsystem of target '%s': %s" % (s, tname, subsystems[s][tname])) return True def check_group_ordering(bld, tgt_list): '''see if we have any dependencies that violate the group ordering It is an error for a target to depend on a target from a later build group ''' def group_name(g): tm = bld.task_manager return [x for x in tm.groups_names if id(tm.groups_names[x]) == id(g)][0] for g in bld.task_manager.groups: gname = group_name(g) for t in g.tasks_gen: t.samba_group = gname grp_map = {} idx = 0 for g in bld.task_manager.groups: name = group_name(g) grp_map[name] = idx idx += 1 targets = LOCAL_CACHE(bld, 'TARGET_TYPE') ret = True for t in tgt_list: tdeps = getattr(t, 'add_objects', []) + getattr(t, 'uselib_local', []) for d in tdeps: t2 = bld.get_tgen_by_name(d) if t2 is None: continue map1 = grp_map[t.samba_group] map2 = grp_map[t2.samba_group] if map2 > map1: Logs.error("Target %r in build group %r depends on target %r from later build group %r" % ( t.sname, t.samba_group, t2.sname, t2.samba_group)) ret = False return ret Build.BuildContext.check_group_ordering = check_group_ordering def show_final_deps(bld, tgt_list): '''show the final dependencies for all targets''' targets = LOCAL_CACHE(bld, 'TARGET_TYPE') for t in tgt_list: if not targets[t.sname] in ['LIBRARY', 'BINARY', 'PYTHON', 'SUBSYSTEM']: continue debug('deps: final dependencies for target %s: uselib=%s uselib_local=%s add_objects=%s', t.sname, t.uselib, getattr(t, 'uselib_local', []), getattr(t, 'add_objects', [])) def add_samba_attributes(bld, tgt_list): '''ensure a target has a the required samba attributes''' targets = LOCAL_CACHE(bld, 'TARGET_TYPE') for t in tgt_list: if t.name != '': t.sname = t.name else: t.sname = t.target t.samba_type = targets[t.sname] t.samba_abspath = t.path.abspath(bld.env) t.samba_deps_extended = t.samba_deps[:] t.samba_includes_extended = TO_LIST(t.samba_includes)[:] t.cflags = getattr(t, 'samba_cflags', '') def replace_grouping_libraries(bld, tgt_list): '''replace dependencies based on grouping libraries If a library is marked as a grouping library, then any target that depends on a subsystem that is part of that grouping library gets that dependency replaced with a dependency on the grouping library ''' targets = LOCAL_CACHE(bld, 'TARGET_TYPE') grouping = {} # find our list of grouping libraries, mapped from the subsystems they depend on for t in tgt_list: if not getattr(t, 'grouping_library', False): continue for dep in t.samba_deps_extended: bld.ASSERT(dep in targets, "grouping library target %s not declared in %s" % (dep, t.sname)) if targets[dep] == 'SUBSYSTEM': grouping[dep] = t.sname # now replace any dependencies on elements of grouping libraries for t in tgt_list: for i in range(len(t.samba_deps_extended)): dep = t.samba_deps_extended[i] if dep in grouping: if t.sname != grouping[dep]: debug("deps: target %s: replacing dependency %s with grouping library %s" % (t.sname, dep, grouping[dep])) t.samba_deps_extended[i] = grouping[dep] def build_direct_deps(bld, tgt_list): '''build the direct_objects and direct_libs sets for each target''' targets = LOCAL_CACHE(bld, 'TARGET_TYPE') syslib_deps = LOCAL_CACHE(bld, 'SYSLIB_DEPS') global_deps = bld.env.GLOBAL_DEPENDENCIES global_deps_exclude = set() for dep in global_deps: t = bld.get_tgen_by_name(dep) for d in t.samba_deps: # prevent loops from the global dependencies list global_deps_exclude.add(d) global_deps_exclude.add(d + '.objlist') for t in tgt_list: t.direct_objects = set() t.direct_libs = set() t.direct_syslibs = set() deps = t.samba_deps_extended[:] if getattr(t, 'samba_use_global_deps', False) and not t.sname in global_deps_exclude: deps.extend(global_deps) for d in deps: if d == t.sname: continue if not d in targets: Logs.error("Unknown dependency '%s' in '%s'" % (d, t.sname)) sys.exit(1) if targets[d] in [ 'EMPTY', 'DISABLED' ]: continue if targets[d] == 'PYTHON' and targets[t.sname] != 'PYTHON' and t.sname.find('.objlist') == -1: # this check should be more restrictive, but for now we have pidl-generated python # code that directly depends on other python modules Logs.error('ERROR: Target %s has dependency on python module %s' % (t.sname, d)) sys.exit(1) if targets[d] == 'SYSLIB': t.direct_syslibs.add(d) if d in syslib_deps: for implied in TO_LIST(syslib_deps[d]): if BUILTIN_LIBRARY(bld, implied): t.direct_objects.add(implied) elif targets[implied] == 'SYSLIB': t.direct_syslibs.add(implied) elif targets[implied] in ['LIBRARY', 'MODULE']: t.direct_libs.add(implied) else: Logs.error('Implied dependency %s in %s is of type %s' % ( implied, t.sname, targets[implied])) sys.exit(1) continue t2 = bld.get_tgen_by_name(d) if t2 is None: Logs.error("no task %s of type %s in %s" % (d, targets[d], t.sname)) sys.exit(1) if t2.samba_type in [ 'LIBRARY', 'MODULE' ]: t.direct_libs.add(d) elif t2.samba_type in [ 'SUBSYSTEM', 'ASN1', 'PYTHON' ]: t.direct_objects.add(d) debug('deps: built direct dependencies') def dependency_loop(loops, t, target): '''add a dependency loop to the loops dictionary''' if t.sname == target: return if not target in loops: loops[target] = set() if not t.sname in loops[target]: loops[target].add(t.sname) def indirect_libs(bld, t, chain, loops): '''recursively calculate the indirect library dependencies for a target An indirect library is a library that results from a dependency on a subsystem ''' ret = getattr(t, 'indirect_libs', None) if ret is not None: return ret ret = set() for obj in t.direct_objects: if obj in chain: dependency_loop(loops, t, obj) continue chain.add(obj) t2 = bld.get_tgen_by_name(obj) r2 = indirect_libs(bld, t2, chain, loops) chain.remove(obj) ret = ret.union(t2.direct_libs) ret = ret.union(r2) for obj in indirect_objects(bld, t, set(), loops): if obj in chain: dependency_loop(loops, t, obj) continue chain.add(obj) t2 = bld.get_tgen_by_name(obj) r2 = indirect_libs(bld, t2, chain, loops) chain.remove(obj) ret = ret.union(t2.direct_libs) ret = ret.union(r2) t.indirect_libs = ret return ret def indirect_objects(bld, t, chain, loops): '''recursively calculate the indirect object dependencies for a target indirect objects are the set of objects from expanding the subsystem dependencies ''' ret = getattr(t, 'indirect_objects', None) if ret is not None: return ret ret = set() for lib in t.direct_objects: if lib in chain: dependency_loop(loops, t, lib) continue chain.add(lib) t2 = bld.get_tgen_by_name(lib) r2 = indirect_objects(bld, t2, chain, loops) chain.remove(lib) ret = ret.union(t2.direct_objects) ret = ret.union(r2) t.indirect_objects = ret return ret def extended_objects(bld, t, chain): '''recursively calculate the extended object dependencies for a target extended objects are the union of: - direct objects - indirect objects - direct and indirect objects of all direct and indirect libraries ''' ret = getattr(t, 'extended_objects', None) if ret is not None: return ret ret = set() ret = ret.union(t.final_objects) for lib in t.final_libs: if lib in chain: continue t2 = bld.get_tgen_by_name(lib) chain.add(lib) r2 = extended_objects(bld, t2, chain) chain.remove(lib) ret = ret.union(t2.final_objects) ret = ret.union(r2) t.extended_objects = ret return ret def includes_objects(bld, t, chain, inc_loops): '''recursively calculate the includes object dependencies for a target includes dependencies come from either library or object dependencies ''' ret = getattr(t, 'includes_objects', None) if ret is not None: return ret ret = t.direct_objects.copy() ret = ret.union(t.direct_libs) for obj in t.direct_objects: if obj in chain: dependency_loop(inc_loops, t, obj) continue chain.add(obj) t2 = bld.get_tgen_by_name(obj) r2 = includes_objects(bld, t2, chain, inc_loops) chain.remove(obj) ret = ret.union(t2.direct_objects) ret = ret.union(r2) for lib in t.direct_libs: if lib in chain: dependency_loop(inc_loops, t, lib) continue chain.add(lib) t2 = bld.get_tgen_by_name(lib) if t2 is None: targets = LOCAL_CACHE(bld, 'TARGET_TYPE') Logs.error('Target %s of type %s not found in direct_libs for %s' % ( lib, targets[lib], t.sname)) sys.exit(1) r2 = includes_objects(bld, t2, chain, inc_loops) chain.remove(lib) ret = ret.union(t2.direct_objects) ret = ret.union(r2) t.includes_objects = ret return ret def break_dependency_loops(bld, tgt_list): '''find and break dependency loops''' loops = {} inc_loops = {} # build up the list of loops for t in tgt_list: indirect_objects(bld, t, set(), loops) indirect_libs(bld, t, set(), loops) includes_objects(bld, t, set(), inc_loops) # break the loops for t in tgt_list: if t.sname in loops: for attr in ['direct_objects', 'indirect_objects', 'direct_libs', 'indirect_libs']: objs = getattr(t, attr, set()) setattr(t, attr, objs.difference(loops[t.sname])) for loop in loops: debug('deps: Found dependency loops for target %s : %s', loop, loops[loop]) for loop in inc_loops: debug('deps: Found include loops for target %s : %s', loop, inc_loops[loop]) # expand the loops mapping by one level for loop in loops.copy(): for tgt in loops[loop]: if tgt in loops: loops[loop] = loops[loop].union(loops[tgt]) for loop in inc_loops.copy(): for tgt in inc_loops[loop]: if tgt in inc_loops: inc_loops[loop] = inc_loops[loop].union(inc_loops[tgt]) # expand indirect subsystem and library loops for loop in loops.copy(): t = bld.get_tgen_by_name(loop) if t.samba_type in ['SUBSYSTEM']: loops[loop] = loops[loop].union(t.indirect_objects) loops[loop] = loops[loop].union(t.direct_objects) if t.samba_type in ['LIBRARY','PYTHON']: loops[loop] = loops[loop].union(t.indirect_libs) loops[loop] = loops[loop].union(t.direct_libs) if loop in loops[loop]: loops[loop].remove(loop) # expand indirect includes loops for loop in inc_loops.copy(): t = bld.get_tgen_by_name(loop) inc_loops[loop] = inc_loops[loop].union(t.includes_objects) if loop in inc_loops[loop]: inc_loops[loop].remove(loop) # add in the replacement dependencies for t in tgt_list: for loop in loops: for attr in ['indirect_objects', 'indirect_libs']: objs = getattr(t, attr, set()) if loop in objs: diff = loops[loop].difference(objs) if t.sname in diff: diff.remove(t.sname) if diff: debug('deps: Expanded target %s of type %s from loop %s by %s', t.sname, t.samba_type, loop, diff) objs = objs.union(diff) setattr(t, attr, objs) for loop in inc_loops: objs = getattr(t, 'includes_objects', set()) if loop in objs: diff = inc_loops[loop].difference(objs) if t.sname in diff: diff.remove(t.sname) if diff: debug('deps: Expanded target %s includes of type %s from loop %s by %s', t.sname, t.samba_type, loop, diff) objs = objs.union(diff) setattr(t, 'includes_objects', objs) def reduce_objects(bld, tgt_list): '''reduce objects by looking for indirect object dependencies''' rely_on = {} for t in tgt_list: t.extended_objects = None changed = False for type in ['BINARY', 'PYTHON', 'LIBRARY']: for t in tgt_list: if t.samba_type != type: continue # if we will indirectly link to a target then we don't need it new = t.final_objects.copy() for l in t.final_libs: t2 = bld.get_tgen_by_name(l) t2_obj = extended_objects(bld, t2, set()) dup = new.intersection(t2_obj) if t.sname in rely_on: dup = dup.difference(rely_on[t.sname]) if dup: # Do not remove duplicates of BUILTINS d = next(iter(dup)) if BUILTIN_LIBRARY(bld, d): continue debug('deps: removing dups from %s of type %s: %s also in %s %s', t.sname, t.samba_type, dup, t2.samba_type, l) new = new.difference(dup) changed = True if not l in rely_on: rely_on[l] = set() rely_on[l] = rely_on[l].union(dup) t.final_objects = new if not changed: return False # add back in any objects that were relied upon by the reduction rules for r in rely_on: t = bld.get_tgen_by_name(r) t.final_objects = t.final_objects.union(rely_on[r]) return True def show_library_loop(bld, lib1, lib2, path, seen): '''show the detailed path of a library loop between lib1 and lib2''' t = bld.get_tgen_by_name(lib1) if not lib2 in getattr(t, 'final_libs', set()): return for d in t.samba_deps_extended: if d in seen: continue seen.add(d) path2 = path + '=>' + d if d == lib2: Logs.warn('library loop path: ' + path2) return show_library_loop(bld, d, lib2, path2, seen) seen.remove(d) def calculate_final_deps(bld, tgt_list, loops): '''calculate the final library and object dependencies''' for t in tgt_list: # start with the maximum possible list t.final_libs = t.direct_libs.union(indirect_libs(bld, t, set(), loops)) t.final_objects = t.direct_objects.union(indirect_objects(bld, t, set(), loops)) for t in tgt_list: # don't depend on ourselves if t.sname in t.final_libs: t.final_libs.remove(t.sname) if t.sname in t.final_objects: t.final_objects.remove(t.sname) # handle any non-shared binaries for t in tgt_list: if t.samba_type == 'BINARY' and bld.NONSHARED_BINARY(t.sname): subsystem_list = LOCAL_CACHE(bld, 'INIT_FUNCTIONS') targets = LOCAL_CACHE(bld, 'TARGET_TYPE') # replace lib deps with objlist deps for l in t.final_libs: objname = l + '.objlist' t2 = bld.get_tgen_by_name(objname) if t2 is None: Logs.error('ERROR: subsystem %s not found' % objname) sys.exit(1) t.final_objects.add(objname) t.final_objects = t.final_objects.union(extended_objects(bld, t2, set())) if l in subsystem_list: # its a subsystem - we also need the contents of any modules for d in subsystem_list[l]: module_name = d['TARGET'] if targets[module_name] == 'LIBRARY': objname = module_name + '.objlist' elif targets[module_name] == 'SUBSYSTEM': objname = module_name else: continue t2 = bld.get_tgen_by_name(objname) if t2 is None: Logs.error('ERROR: subsystem %s not found' % objname) sys.exit(1) t.final_objects.add(objname) t.final_objects = t.final_objects.union(extended_objects(bld, t2, set())) t.final_libs = set() # find any library loops for t in tgt_list: if t.samba_type in ['LIBRARY', 'PYTHON']: for l in t.final_libs.copy(): t2 = bld.get_tgen_by_name(l) if t.sname in t2.final_libs: if getattr(bld.env, "ALLOW_CIRCULAR_LIB_DEPENDENCIES", False): # we could break this in either direction. If one of the libraries # has a version number, and will this be distributed publicly, then # we should make it the lower level library in the DAG Logs.warn('deps: removing library loop %s from %s' % (t.sname, t2.sname)) dependency_loop(loops, t, t2.sname) t2.final_libs.remove(t.sname) else: Logs.error('ERROR: circular library dependency between %s and %s' % (t.sname, t2.sname)) show_library_loop(bld, t.sname, t2.sname, t.sname, set()) show_library_loop(bld, t2.sname, t.sname, t2.sname, set()) sys.exit(1) for loop in loops: debug('deps: Found dependency loops for target %s : %s', loop, loops[loop]) # we now need to make corrections for any library loops we broke up # any target that depended on the target of the loop and doesn't # depend on the source of the loop needs to get the loop source added for type in ['BINARY','PYTHON','LIBRARY','BINARY']: for t in tgt_list: if t.samba_type != type: continue for loop in loops: if loop in t.final_libs: diff = loops[loop].difference(t.final_libs) if t.sname in diff: diff.remove(t.sname) if t.sname in diff: diff.remove(t.sname) # make sure we don't recreate the loop again! for d in diff.copy(): t2 = bld.get_tgen_by_name(d) if t2.samba_type == 'LIBRARY': if t.sname in t2.final_libs: debug('deps: removing expansion %s from %s', d, t.sname) diff.remove(d) if diff: debug('deps: Expanded target %s by loop %s libraries (loop %s) %s', t.sname, loop, loops[loop], diff) t.final_libs = t.final_libs.union(diff) # remove objects that are also available in linked libs count = 0 while reduce_objects(bld, tgt_list): count += 1 if count > 100: Logs.warn("WARNING: Unable to remove all inter-target object duplicates") break debug('deps: Object reduction took %u iterations', count) # add in any syslib dependencies for t in tgt_list: if not t.samba_type in ['BINARY','PYTHON','LIBRARY','SUBSYSTEM']: continue syslibs = set() for d in t.final_objects: t2 = bld.get_tgen_by_name(d) syslibs = syslibs.union(t2.direct_syslibs) # this adds the indirect syslibs as well, which may not be needed # depending on the linker flags for d in t.final_libs: t2 = bld.get_tgen_by_name(d) syslibs = syslibs.union(t2.direct_syslibs) t.final_syslibs = syslibs # find any unresolved library loops lib_loop_error = False for t in tgt_list: if t.samba_type in ['LIBRARY', 'PYTHON']: for l in t.final_libs.copy(): t2 = bld.get_tgen_by_name(l) if t.sname in t2.final_libs: Logs.error('ERROR: Unresolved library loop %s from %s' % (t.sname, t2.sname)) lib_loop_error = True if lib_loop_error: sys.exit(1) debug('deps: removed duplicate dependencies') def show_dependencies(bld, target, seen): '''recursively show the dependencies of target''' if target in seen: return t = bld.get_tgen_by_name(target) if t is None: Logs.error("ERROR: Unable to find target '%s'" % target) sys.exit(1) Logs.info('%s(OBJECTS): %s' % (target, t.direct_objects)) Logs.info('%s(LIBS): %s' % (target, t.direct_libs)) Logs.info('%s(SYSLIBS): %s' % (target, t.direct_syslibs)) seen.add(target) for t2 in t.direct_objects: show_dependencies(bld, t2, seen) def show_object_duplicates(bld, tgt_list): '''show a list of object files that are included in more than one library or binary''' targets = LOCAL_CACHE(bld, 'TARGET_TYPE') used_by = {} Logs.info("showing duplicate objects") for t in tgt_list: if not targets[t.sname] in [ 'LIBRARY', 'PYTHON' ]: continue for n in getattr(t, 'final_objects', set()): t2 = bld.get_tgen_by_name(n) if not n in used_by: used_by[n] = set() used_by[n].add(t.sname) for n in used_by: if len(used_by[n]) > 1: Logs.info("target '%s' is used by %s" % (n, used_by[n])) Logs.info("showing indirect dependency counts (sorted by count)") def indirect_count(t1, t2): return len(t2.indirect_objects) - len(t1.indirect_objects) sorted_list = sorted(tgt_list, cmp=indirect_count) for t in sorted_list: if len(t.indirect_objects) > 1: Logs.info("%s depends on %u indirect objects" % (t.sname, len(t.indirect_objects))) ###################################################################### # this provides a way to save our dependency calculations between runs savedeps_version = 3 savedeps_inputs = ['samba_deps', 'samba_includes', 'local_include', 'local_include_first', 'samba_cflags', 'source', 'grouping_library', 'samba_ldflags', 'allow_undefined_symbols', 'use_global_deps', 'global_include' ] savedeps_outputs = ['uselib', 'uselib_local', 'add_objects', 'includes', 'cflags', 'ldflags', 'samba_deps_extended', 'final_libs'] savedeps_outenv = ['INC_PATHS'] savedeps_envvars = ['NONSHARED_BINARIES', 'GLOBAL_DEPENDENCIES', 'EXTRA_CFLAGS', 'EXTRA_LDFLAGS', 'EXTRA_INCLUDES' ] savedeps_caches = ['GLOBAL_DEPENDENCIES', 'TARGET_TYPE', 'INIT_FUNCTIONS', 'SYSLIB_DEPS'] savedeps_files = ['buildtools/wafsamba/samba_deps.py'] def save_samba_deps(bld, tgt_list): '''save the dependency calculations between builds, to make further builds faster''' denv = ConfigSet.ConfigSet() denv.version = savedeps_version denv.savedeps_inputs = savedeps_inputs denv.savedeps_outputs = savedeps_outputs denv.input = {} denv.output = {} denv.outenv = {} denv.caches = {} denv.envvar = {} denv.files = {} for f in savedeps_files: denv.files[f] = os.stat(os.path.join(bld.srcnode.abspath(), f)).st_mtime for c in savedeps_caches: denv.caches[c] = LOCAL_CACHE(bld, c) for e in savedeps_envvars: denv.envvar[e] = bld.env[e] for t in tgt_list: # save all the input attributes for each target tdeps = {} for attr in savedeps_inputs: v = getattr(t, attr, None) if v is not None: tdeps[attr] = v if tdeps != {}: denv.input[t.sname] = tdeps # save all the output attributes for each target tdeps = {} for attr in savedeps_outputs: v = getattr(t, attr, None) if v is not None: tdeps[attr] = v if tdeps != {}: denv.output[t.sname] = tdeps tdeps = {} for attr in savedeps_outenv: if attr in t.env: tdeps[attr] = t.env[attr] if tdeps != {}: denv.outenv[t.sname] = tdeps depsfile = os.path.join(bld.cache_dir, "sambadeps") denv.store_fast(depsfile) def load_samba_deps(bld, tgt_list): '''load a previous set of build dependencies if possible''' depsfile = os.path.join(bld.cache_dir, "sambadeps") denv = ConfigSet.ConfigSet() try: debug('deps: checking saved dependencies') denv.load_fast(depsfile) if (denv.version != savedeps_version or denv.savedeps_inputs != savedeps_inputs or denv.savedeps_outputs != savedeps_outputs): return False except Exception: return False # check if critical files have changed for f in savedeps_files: if f not in denv.files: return False if denv.files[f] != os.stat(os.path.join(bld.srcnode.abspath(), f)).st_mtime: return False # check if caches are the same for c in savedeps_caches: if c not in denv.caches or denv.caches[c] != LOCAL_CACHE(bld, c): return False # check if caches are the same for e in savedeps_envvars: if e not in denv.envvar or denv.envvar[e] != bld.env[e]: return False # check inputs are the same for t in tgt_list: tdeps = {} for attr in savedeps_inputs: v = getattr(t, attr, None) if v is not None: tdeps[attr] = v if t.sname in denv.input: olddeps = denv.input[t.sname] else: olddeps = {} if tdeps != olddeps: #print '%s: \ntdeps=%s \nodeps=%s' % (t.sname, tdeps, olddeps) return False # put outputs in place for t in tgt_list: if not t.sname in denv.output: continue tdeps = denv.output[t.sname] for a in tdeps: setattr(t, a, tdeps[a]) # put output env vars in place for t in tgt_list: if not t.sname in denv.outenv: continue tdeps = denv.outenv[t.sname] for a in tdeps: t.env[a] = tdeps[a] debug('deps: loaded saved dependencies') return True def check_project_rules(bld): '''check the project rules - ensuring the targets are sane''' loops = {} inc_loops = {} tgt_list = get_tgt_list(bld) add_samba_attributes(bld, tgt_list) force_project_rules = (Options.options.SHOWDEPS or Options.options.SHOW_DUPLICATES) if not force_project_rules and load_samba_deps(bld, tgt_list): return timer = Utils.Timer() bld.new_rules = True Logs.info("Checking project rules ...") debug('deps: project rules checking started') expand_subsystem_deps(bld) debug("deps: expand_subsystem_deps: %s" % str(timer)) replace_grouping_libraries(bld, tgt_list) debug("deps: replace_grouping_libraries: %s" % str(timer)) build_direct_deps(bld, tgt_list) debug("deps: build_direct_deps: %s" % str(timer)) break_dependency_loops(bld, tgt_list) debug("deps: break_dependency_loops: %s" % str(timer)) if Options.options.SHOWDEPS: show_dependencies(bld, Options.options.SHOWDEPS, set()) calculate_final_deps(bld, tgt_list, loops) debug("deps: calculate_final_deps: %s" % str(timer)) if Options.options.SHOW_DUPLICATES: show_object_duplicates(bld, tgt_list) # run the various attribute generators for f in [ build_dependencies, build_includes, add_init_functions ]: debug('deps: project rules checking %s', f) for t in tgt_list: f(t) debug("deps: %s: %s" % (f, str(timer))) debug('deps: project rules stage1 completed') if not check_duplicate_sources(bld, tgt_list): Logs.error("Duplicate sources present - aborting") sys.exit(1) debug("deps: check_duplicate_sources: %s" % str(timer)) if not bld.check_group_ordering(tgt_list): Logs.error("Bad group ordering - aborting") sys.exit(1) debug("deps: check_group_ordering: %s" % str(timer)) show_final_deps(bld, tgt_list) debug("deps: show_final_deps: %s" % str(timer)) debug('deps: project rules checking completed - %u targets checked', len(tgt_list)) if not bld.is_install: save_samba_deps(bld, tgt_list) debug("deps: save_samba_deps: %s" % str(timer)) Logs.info("Project rules pass") def CHECK_PROJECT_RULES(bld): '''enable checking of project targets for sanity''' if bld.env.added_project_rules: return bld.env.added_project_rules = True bld.add_pre_fun(check_project_rules) Build.BuildContext.CHECK_PROJECT_RULES = CHECK_PROJECT_RULES tdb-1.4.2/buildtools/wafsamba/samba_dist.py0000660000000000000000000002124313444661620020654 0ustar rootroot00000000000000# customised version of 'waf dist' for Samba tools # uses git ls-files to get file lists import os, sys, tarfile from waflib import Utils, Scripting, Logs, Options from waflib.Configure import conf from samba_utils import os_path_relpath, get_string from waflib import Context dist_dirs = None dist_files = None dist_blacklist = "" dist_archive = None class Dist(Context.Context): # TODO remove cmd = 'dist' fun = 'dist' def execute(self): Context.g_module.dist() class DistCheck(Scripting.DistCheck): fun = 'distcheck' cmd = 'distcheck' def execute(self): Options.options.distcheck_args = '' if Context.g_module.distcheck is Scripting.distcheck: # default Context.g_module.distcheck(self) else: Context.g_module.distcheck() Context.g_module.dist() self.check() def get_arch_name(self): global dist_archive return dist_archive def make_distcheck_cmd(self, tmpdir): waf = os.path.abspath(sys.argv[0]) return [sys.executable, waf, 'configure', 'build', 'install', 'uninstall', '--destdir=' + tmpdir] def add_symlink(tar, fname, abspath, basedir): '''handle symlinks to directories that may move during packaging''' if not os.path.islink(abspath): return False tinfo = tar.gettarinfo(name=abspath, arcname=fname) tgt = os.readlink(abspath) if dist_dirs: # we need to find the target relative to the main directory # this is here to cope with symlinks into the buildtools # directory from within the standalone libraries in Samba. For example, # a symlink to ../../builtools/scripts/autogen-waf.sh needs # to be rewritten as a symlink to buildtools/scripts/autogen-waf.sh # when the tarball for talloc is built # the filename without the appname-version rel_fname = '/'.join(fname.split('/')[1:]) # join this with the symlink target tgt_full = os.path.join(os.path.dirname(rel_fname), tgt) # join with the base directory tgt_base = os.path.normpath(os.path.join(basedir, tgt_full)) # see if this is inside one of our dist_dirs for dir in dist_dirs.split(): if dir.find(':') != -1: destdir=dir.split(':')[1] dir=dir.split(':')[0] else: destdir = '.' if dir == basedir: # internal links don't get rewritten continue if dir == tgt_base[0:len(dir)] and tgt_base[len(dir)] == '/': new_tgt = destdir + tgt_base[len(dir):] tinfo.linkname = new_tgt break tinfo.uid = 0 tinfo.gid = 0 tinfo.uname = 'root' tinfo.gname = 'root' tar.addfile(tinfo) return True def add_tarfile(tar, fname, abspath, basedir): '''add a file to the tarball''' if add_symlink(tar, fname, abspath, basedir): return try: tinfo = tar.gettarinfo(name=abspath, arcname=fname) except OSError: Logs.error('Unable to find file %s - missing from git checkout?' % abspath) sys.exit(1) tinfo.uid = 0 tinfo.gid = 0 tinfo.uname = 'root' tinfo.gname = 'root' fh = open(abspath, "rb") tar.addfile(tinfo, fileobj=fh) fh.close() def vcs_dir_contents(path): """Return the versioned files under a path. :return: List of paths relative to path """ repo = path while repo != "/": if os.path.isdir(os.path.join(repo, ".git")): ls_files_cmd = [ 'git', 'ls-files', '--full-name', os_path_relpath(path, repo) ] cwd = None env = dict(os.environ) env["GIT_DIR"] = os.path.join(repo, ".git") break repo = os.path.dirname(repo) if repo == "/": raise Exception("unsupported or no vcs for %s" % path) return get_string(Utils.cmd_output(ls_files_cmd, cwd=cwd, env=env)).split('\n') def dist(appname='', version=''): def add_files_to_tarball(tar, srcdir, srcsubdir, dstdir, dstsubdir, blacklist, files): if blacklist is None: blacklist = [] for f in files: abspath = os.path.join(srcdir, f) if srcsubdir != '.': f = f[len(srcsubdir)+1:] # Remove files in the blacklist if f in blacklist: continue blacklisted = False # Remove directories in the blacklist for d in blacklist: if f.startswith(d): blacklisted = True if blacklisted: continue if os.path.isdir(abspath) and not os.path.islink(abspath): continue if dstsubdir != '.': f = dstsubdir + '/' + f fname = dstdir + '/' + f add_tarfile(tar, fname, abspath, srcsubdir) def list_directory_files(path): curdir = os.getcwd() os.chdir(srcdir) out_files = [] for root, dirs, files in os.walk(path): for f in files: out_files.append(os.path.join(root, f)) os.chdir(curdir) return out_files if not isinstance(appname, str) or not appname: # this copes with a mismatch in the calling arguments for dist() appname = Context.g_module.APPNAME version = Context.g_module.VERSION if not version: version = Context.g_module.VERSION srcdir = os.path.normpath( os.path.join(os.path.dirname(Context.g_module.root_path), Context.g_module.top)) if not dist_dirs: Logs.error('You must use samba_dist.DIST_DIRS() to set which directories to package') sys.exit(1) dist_base = '%s-%s' % (appname, version) if Options.options.SIGN_RELEASE: dist_name = '%s.tar' % (dist_base) tar = tarfile.open(dist_name, 'w') else: dist_name = '%s.tar.gz' % (dist_base) tar = tarfile.open(dist_name, 'w:gz') blacklist = dist_blacklist.split() for dir in dist_dirs.split(): if dir.find(':') != -1: destdir=dir.split(':')[1] dir=dir.split(':')[0] else: destdir = '.' absdir = os.path.join(srcdir, dir) try: files = vcs_dir_contents(absdir) except Exception as e: Logs.error('unable to get contents of %s: %s' % (absdir, e)) sys.exit(1) add_files_to_tarball(tar, srcdir, dir, dist_base, destdir, blacklist, files) if dist_files: for file in dist_files.split(): if file.find(':') != -1: destfile = file.split(':')[1] file = file.split(':')[0] else: destfile = file absfile = os.path.join(srcdir, file) if os.path.isdir(absfile) and not os.path.islink(absfile): destdir = destfile dir = file files = list_directory_files(dir) add_files_to_tarball(tar, srcdir, dir, dist_base, destdir, blacklist, files) else: fname = dist_base + '/' + destfile add_tarfile(tar, fname, absfile, destfile) tar.close() if Options.options.SIGN_RELEASE: import gzip try: os.unlink(dist_name + '.asc') except OSError: pass cmd = "gpg --detach-sign --armor " + dist_name os.system(cmd) uncompressed_tar = open(dist_name, 'rb') compressed_tar = gzip.open(dist_name + '.gz', 'wb') while 1: buffer = uncompressed_tar.read(1048576) if buffer: compressed_tar.write(buffer) else: break uncompressed_tar.close() compressed_tar.close() os.unlink(dist_name) Logs.info('Created %s.gz %s.asc' % (dist_name, dist_name)) dist_name = dist_name + '.gz' else: Logs.info('Created %s' % dist_name) # TODO use the ctx object instead global dist_archive dist_archive = dist_name return dist_name @conf def DIST_DIRS(dirs): '''set the directories to package, relative to top srcdir''' global dist_dirs if not dist_dirs: dist_dirs = dirs @conf def DIST_FILES(files, extend=False): '''set additional files for packaging, relative to top srcdir''' global dist_files if not dist_files: dist_files = files elif extend: dist_files = dist_files + " " + files @conf def DIST_BLACKLIST(blacklist): '''set the files to exclude from packaging, relative to top srcdir''' global dist_blacklist if not dist_blacklist: dist_blacklist = blacklist Scripting.dist = dist tdb-1.4.2/buildtools/wafsamba/samba_git.py0000660000000000000000000000342013444661620020471 0ustar rootroot00000000000000import os import subprocess def find_git(env=None): """Find the git binary.""" if env is not None and 'GIT' in env: return env.get_flat('GIT') # Get version from GIT if os.path.exists("/usr/bin/git"): # this is useful when doing make dist without configuring return "/usr/bin/git" return None def has_submodules(path): """Check whether a source directory is git-versioned and has submodules. :param path: Path to Samba source directory """ return (os.path.isdir(os.path.join(path, ".git")) and os.path.isfile(os.path.join(path, ".gitmodules"))) def read_submodule_status(path, env=None): """Check status of submodules. :param path: Path to git directory :param env: Optional waf environment :return: Yields tuples with submodule relpath and status (one of: 'out-of-date', 'not-checked-out', 'up-to-date') :raise RuntimeError: raised when parsing of 'git submodule status' output fails. """ if not has_submodules(path): # No point in running git. return git = find_git(env) if git is None: return p = subprocess.Popen([git, "submodule", "status"], stdout=subprocess.PIPE, cwd=path) (stdout, stderr) = p.communicate(None) for l in stdout.splitlines(): l = l.rstrip() status = l[0] l = l[1:] parts = l.split(" ") if len(parts) > 2 and status in ("-", "+"): yield (parts[1], "out-of-date") elif len(parts) == 2 and status == "-": yield (parts[1], "not-checked-out") elif len(parts) > 2 and status == " ": yield (parts[1], "up-to-date") else: raise RuntimeError("Unable to parse submodule status: %r, %r" % (status, parts)) tdb-1.4.2/buildtools/wafsamba/samba_headers.py0000660000000000000000000001475213444661620021333 0ustar rootroot00000000000000# specialist handling of header files for Samba import os, re, sys, fnmatch from waflib import Build, Logs, Utils, Errors from samba_utils import TO_LIST, os_path_relpath def header_install_path(header, header_path): '''find the installation path for a header, given a header_path option''' if not header_path: return '' if not isinstance(header_path, list): return header_path for (p1, dir) in header_path: for p2 in TO_LIST(p1): if fnmatch.fnmatch(header, p2): return dir # default to current path return '' re_header = re.compile('^\s*#\s*include[ \t]*"([^"]+)"', re.I | re.M) # a dictionary mapping source header paths to public header paths header_map = {} def find_suggested_header(hpath): '''find a suggested header path to use''' base = os.path.basename(hpath) ret = [] for h in header_map: if os.path.basename(h) == base: ret.append('<%s>' % header_map[h]) ret.append('"%s"' % h) return ret def create_public_header(task): '''create a public header from a private one, output within the build tree''' src = task.inputs[0].abspath(task.env) tgt = task.outputs[0].bldpath(task.env) if os.path.exists(tgt): os.unlink(tgt) relsrc = os_path_relpath(src, task.env.TOPDIR) infile = open(src, mode='r') outfile = open(tgt, mode='w') linenumber = 0 search_paths = [ '', task.env.RELPATH ] for i in task.env.EXTRA_INCLUDES: if i.startswith('#'): search_paths.append(i[1:]) for line in infile: linenumber += 1 # allow some straight substitutions if task.env.public_headers_replace and line.strip() in task.env.public_headers_replace: outfile.write(task.env.public_headers_replace[line.strip()] + '\n') continue # see if its an include line m = re_header.match(line) if m is None: outfile.write(line) continue # its an include, get the header path hpath = m.group(1) if hpath.startswith("bin/default/"): hpath = hpath[12:] # some are always allowed if task.env.public_headers_skip and hpath in task.env.public_headers_skip: outfile.write(line) continue # work out the header this refers to found = False for s in search_paths: p = os.path.normpath(os.path.join(s, hpath)) if p in header_map: outfile.write("#include <%s>\n" % header_map[p]) found = True break if found: continue if task.env.public_headers_allow_broken: Logs.warn("Broken public header include '%s' in '%s'" % (hpath, relsrc)) outfile.write(line) continue # try to be nice to the developer by suggesting an alternative suggested = find_suggested_header(hpath) outfile.close() os.unlink(tgt) sys.stderr.write("%s:%u:Error: unable to resolve public header %s (maybe try one of %s)\n" % ( os.path.relpath(src, os.getcwd()), linenumber, hpath, suggested)) raise Errors.WafError("Unable to resolve header path '%s' in public header '%s' in directory %s" % ( hpath, relsrc, task.env.RELPATH)) infile.close() outfile.close() def public_headers_simple(bld, public_headers, header_path=None, public_headers_install=True): '''install some headers - simple version, no munging needed ''' if not public_headers_install: return for h in TO_LIST(public_headers): inst_path = header_install_path(h, header_path) if h.find(':') != -1: s = h.split(":") h_name = s[0] inst_name = s[1] else: h_name = h inst_name = os.path.basename(h) bld.INSTALL_FILES('${INCLUDEDIR}', h_name, destname=inst_name) def PUBLIC_HEADERS(bld, public_headers, header_path=None, public_headers_install=True): '''install some headers header_path may either be a string that is added to the INCLUDEDIR, or it can be a dictionary of wildcard patterns which map to destination directories relative to INCLUDEDIR ''' bld.SET_BUILD_GROUP('final') if not bld.env.build_public_headers: # in this case no header munging neeeded. Used for tdb, talloc etc public_headers_simple(bld, public_headers, header_path=header_path, public_headers_install=public_headers_install) return # create the public header in the given path # in the build tree for h in TO_LIST(public_headers): inst_path = header_install_path(h, header_path) if h.find(':') != -1: s = h.split(":") h_name = s[0] inst_name = s[1] else: h_name = h inst_name = os.path.basename(h) curdir = bld.path.abspath() relpath1 = os_path_relpath(bld.srcnode.abspath(), curdir) relpath2 = os_path_relpath(curdir, bld.srcnode.abspath()) targetdir = os.path.normpath(os.path.join(relpath1, bld.env.build_public_headers, inst_path)) if not os.path.exists(os.path.join(curdir, targetdir)): raise Errors.WafError("missing source directory %s for public header %s" % (targetdir, inst_name)) target = os.path.join(targetdir, inst_name) # the source path of the header, relative to the top of the source tree src_path = os.path.normpath(os.path.join(relpath2, h_name)) # the install path of the header, relative to the public include directory target_path = os.path.normpath(os.path.join(inst_path, inst_name)) header_map[src_path] = target_path t = bld.SAMBA_GENERATOR('HEADER_%s/%s/%s' % (relpath2, inst_path, inst_name), group='headers', rule=create_public_header, source=h_name, target=target) t.env.RELPATH = relpath2 t.env.TOPDIR = bld.srcnode.abspath() if not bld.env.public_headers_list: bld.env.public_headers_list = [] bld.env.public_headers_list.append(os.path.join(inst_path, inst_name)) if public_headers_install: bld.INSTALL_FILES('${INCLUDEDIR}', target, destname=os.path.join(inst_path, inst_name), flat=True) Build.BuildContext.PUBLIC_HEADERS = PUBLIC_HEADERS tdb-1.4.2/buildtools/wafsamba/samba_install.py0000660000000000000000000002037513444661620021364 0ustar rootroot00000000000000########################### # this handles the magic we need to do for installing # with all the configure options that affect rpath and shared # library use import os from waflib import Utils, Errors from waflib.TaskGen import feature, before, after from samba_utils import LIB_PATH, MODE_755, install_rpath, build_rpath @feature('install_bin') @after('apply_core') @before('apply_link', 'apply_obj_vars') def install_binary(self): '''install a binary, taking account of the different rpath varients''' bld = self.bld # get the ldflags we will use for install and build install_ldflags = install_rpath(self) build_ldflags = build_rpath(bld) if not self.bld.is_install: # just need to set rpath if we are not installing self.env.RPATH = build_ldflags return # work out the install path, expanding variables install_path = getattr(self, 'samba_inst_path', None) or '${BINDIR}' install_path = bld.EXPAND_VARIABLES(install_path) orig_target = os.path.basename(self.target) if install_ldflags != build_ldflags: # we will be creating a new target name, and using that for the # install link. That stops us from overwriting the existing build # target, which has different ldflags self.target += '.inst' # setup the right rpath link flags for the install self.env.RPATH = install_ldflags if not self.samba_install: # this binary is marked not to be installed return # tell waf to install the right binary bld.install_as(os.path.join(install_path, orig_target), self.path.find_or_declare(self.target), chmod=MODE_755) @feature('install_lib') @after('apply_core') @before('apply_link', 'apply_obj_vars') def install_library(self): '''install a library, taking account of the different rpath varients''' if getattr(self, 'done_install_library', False): return bld = self.bld default_env = bld.all_envs['default'] try: install_ldflags = install_rpath(self) build_ldflags = build_rpath(bld) if not self.bld.is_install or not getattr(self, 'samba_install', True): # just need to set the build rpath if we are not installing self.env.RPATH = build_ldflags return # setup the install path, expanding variables install_path = getattr(self, 'samba_inst_path', None) if install_path is None: if getattr(self, 'private_library', False): install_path = '${PRIVATELIBDIR}' else: install_path = '${LIBDIR}' install_path = bld.EXPAND_VARIABLES(install_path) target_name = self.target if install_ldflags != build_ldflags: # we will be creating a new target name, and using that for the # install link. That stops us from overwriting the existing build # target, which has different ldflags self.done_install_library = True t = self.clone(self.env) t.posted = False t.target += '.inst' t.name = self.name + '.inst' self.env.RPATH = build_ldflags else: t = self t.env.RPATH = install_ldflags dev_link = None # in the following the names are: # - inst_name is the name with .inst. in it, in the build # directory # - install_name is the name in the install directory # - install_link is a symlink in the install directory, to install_name if getattr(self, 'samba_realname', None): install_name = self.samba_realname install_link = None if getattr(self, 'soname', ''): install_link = self.soname if getattr(self, 'samba_type', None) == 'PYTHON': inst_name = bld.make_libname(t.target, nolibprefix=True, python=True) else: inst_name = bld.make_libname(t.target) elif self.vnum: vnum_base = self.vnum.split('.')[0] install_name = bld.make_libname(target_name, version=self.vnum) install_link = bld.make_libname(target_name, version=vnum_base) inst_name = bld.make_libname(t.target) if not self.private_library or not t.env.SONAME_ST: # only generate the dev link for non-bundled libs dev_link = bld.make_libname(target_name) elif getattr(self, 'soname', ''): install_name = bld.make_libname(target_name) install_link = self.soname inst_name = bld.make_libname(t.target) else: install_name = bld.make_libname(target_name) install_link = None inst_name = bld.make_libname(t.target) if t.env.SONAME_ST: # ensure we get the right names in the library if install_link: t.env.append_value('LINKFLAGS', t.env.SONAME_ST % install_link) else: t.env.append_value('LINKFLAGS', t.env.SONAME_ST % install_name) t.env.SONAME_ST = '' # tell waf to install the library bld.install_as(os.path.join(install_path, install_name), self.path.find_or_declare(inst_name), chmod=MODE_755) if install_link and install_link != install_name: # and the symlink if needed bld.symlink_as(os.path.join(install_path, install_link), os.path.basename(install_name)) if dev_link: bld.symlink_as(os.path.join(install_path, dev_link), os.path.basename(install_name)) finally: bld.all_envs['default'] = default_env @feature('cshlib') @after('apply_implib') @before('apply_vnum') def apply_soname(self): '''install a library, taking account of the different rpath varients''' if self.env.SONAME_ST and getattr(self, 'soname', ''): self.env.append_value('LINKFLAGS', self.env.SONAME_ST % self.soname) self.env.SONAME_ST = '' @feature('cshlib') @after('apply_implib') @before('apply_vnum') def apply_vscript(self): '''add version-script arguments to library build''' if self.env.HAVE_LD_VERSION_SCRIPT and getattr(self, 'version_script', ''): self.env.append_value('LINKFLAGS', "-Wl,--version-script=%s" % self.version_script) self.version_script = None ############################## # handle the creation of links for libraries and binaries in the build tree @feature('symlink_lib') @after('apply_link') def symlink_lib(self): '''symlink a shared lib''' if self.target.endswith('.inst'): return blddir = os.path.dirname(self.bld.srcnode.abspath(self.bld.env)) libpath = self.link_task.outputs[0].abspath(self.env) # calculat the link target and put it in the environment soext="" vnum = getattr(self, 'vnum', None) if vnum is not None: soext = '.' + vnum.split('.')[0] link_target = getattr(self, 'link_name', '') if link_target == '': basename = os.path.basename(self.bld.make_libname(self.target, version=soext)) if getattr(self, "private_library", False): link_target = '%s/private/%s' % (LIB_PATH, basename) else: link_target = '%s/%s' % (LIB_PATH, basename) link_target = os.path.join(blddir, link_target) if os.path.lexists(link_target): if os.path.islink(link_target) and os.readlink(link_target) == libpath: return os.unlink(link_target) link_container = os.path.dirname(link_target) if not os.path.isdir(link_container): os.makedirs(link_container) os.symlink(libpath, link_target) @feature('symlink_bin') @after('apply_link') def symlink_bin(self): '''symlink a binary into the build directory''' if self.target.endswith('.inst'): return if not self.link_task.outputs or not self.link_task.outputs[0]: raise Errors.WafError('no outputs found for %s in symlink_bin' % self.name) binpath = self.link_task.outputs[0].abspath(self.env) bldpath = os.path.join(self.bld.env.BUILD_DIRECTORY, self.link_task.outputs[0].name) if os.path.lexists(bldpath): if os.path.islink(bldpath) and os.readlink(bldpath) == binpath: return os.unlink(bldpath) os.symlink(binpath, bldpath) tdb-1.4.2/buildtools/wafsamba/samba_patterns.py0000660000000000000000000002307513444661620021556 0ustar rootroot00000000000000# a waf tool to add extension based build patterns for Samba import sys from waflib import Build from wafsamba import samba_version_file def write_version_header(task): '''print version.h contents''' src = task.inputs[0].srcpath(task.env) version = samba_version_file(src, task.env.srcdir, env=task.env, is_install=task.generator.bld.is_install) string = str(version) task.outputs[0].write(string) return 0 def SAMBA_MKVERSION(bld, target, source='VERSION'): '''generate the version.h header for Samba''' # We only force waf to re-generate this file if we are installing, # because only then is information not included in the deps (the # git revision) included in the version. t = bld.SAMBA_GENERATOR('VERSION', rule=write_version_header, source=source, target=target, always=bld.is_install) Build.BuildContext.SAMBA_MKVERSION = SAMBA_MKVERSION def write_build_options_header(fp): '''write preamble for build_options.c''' fp.write("/*\n") fp.write(" Unix SMB/CIFS implementation.\n") fp.write(" Build Options for Samba Suite\n") fp.write(" Copyright (C) Vance Lankhaar 2003\n") fp.write(" Copyright (C) Andrew Bartlett 2001\n") fp.write("\n") fp.write(" This program is free software; you can redistribute it and/or modify\n") fp.write(" it under the terms of the GNU General Public License as published by\n") fp.write(" the Free Software Foundation; either version 3 of the License, or\n") fp.write(" (at your option) any later version.\n") fp.write("\n") fp.write(" This program is distributed in the hope that it will be useful,\n") fp.write(" but WITHOUT ANY WARRANTY; without even the implied warranty of\n") fp.write(" MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n") fp.write(" GNU General Public License for more details.\n") fp.write("\n") fp.write(" You should have received a copy of the GNU General Public License\n") fp.write(" along with this program; if not, see .\n") fp.write("*/\n") fp.write("\n") fp.write("#include \"includes.h\"\n") fp.write("#include \"dynconfig/dynconfig.h\"\n") fp.write("#include \"lib/cluster_support.h\"\n") fp.write("\n") fp.write("static int output(bool screen, const char *format, ...) PRINTF_ATTRIBUTE(2,3);\n") fp.write("void build_options(bool screen);\n") fp.write("\n") fp.write("\n") fp.write("/****************************************************************************\n") fp.write("helper function for build_options\n") fp.write("****************************************************************************/\n") fp.write("static int output(bool screen, const char *format, ...)\n") fp.write("{\n") fp.write(" char *ptr = NULL;\n") fp.write(" int ret = 0;\n") fp.write(" va_list ap;\n") fp.write(" \n") fp.write(" va_start(ap, format);\n") fp.write(" ret = vasprintf(&ptr,format,ap);\n") fp.write(" va_end(ap);\n") fp.write("\n") fp.write(" if (screen) {\n") fp.write(" d_printf(\"%s\", ptr ? ptr : \"\");\n") fp.write(" } else {\n") fp.write(" DEBUG(4,(\"%s\", ptr ? ptr : \"\"));\n") fp.write(" }\n") fp.write(" \n") fp.write(" SAFE_FREE(ptr);\n") fp.write(" return ret;\n") fp.write("}\n") fp.write("\n") fp.write("/****************************************************************************\n") fp.write("options set at build time for the samba suite\n") fp.write("****************************************************************************/\n") fp.write("void build_options(bool screen)\n") fp.write("{\n") fp.write(" if ((DEBUGLEVEL < 4) && (!screen)) {\n") fp.write(" return;\n") fp.write(" }\n") fp.write("\n") fp.write("\n") fp.write(" /* Output various paths to files and directories */\n") fp.write(" output(screen,\"\\nPaths:\\n\");\n") fp.write(" output(screen,\" SBINDIR: %s\\n\", get_dyn_SBINDIR());\n") fp.write(" output(screen,\" BINDIR: %s\\n\", get_dyn_BINDIR());\n") fp.write(" output(screen,\" CONFIGFILE: %s\\n\", get_dyn_CONFIGFILE());\n") fp.write(" output(screen,\" LOGFILEBASE: %s\\n\", get_dyn_LOGFILEBASE());\n") fp.write(" output(screen,\" LMHOSTSFILE: %s\\n\",get_dyn_LMHOSTSFILE());\n") fp.write(" output(screen,\" LIBDIR: %s\\n\",get_dyn_LIBDIR());\n") fp.write(" output(screen,\" MODULESDIR: %s\\n\",get_dyn_MODULESDIR());\n") fp.write(" output(screen,\" SHLIBEXT: %s\\n\",get_dyn_SHLIBEXT());\n") fp.write(" output(screen,\" LOCKDIR: %s\\n\",get_dyn_LOCKDIR());\n") fp.write(" output(screen,\" STATEDIR: %s\\n\",get_dyn_STATEDIR());\n") fp.write(" output(screen,\" CACHEDIR: %s\\n\",get_dyn_CACHEDIR());\n") fp.write(" output(screen,\" PIDDIR: %s\\n\", get_dyn_PIDDIR());\n") fp.write(" output(screen,\" SMB_PASSWD_FILE: %s\\n\",get_dyn_SMB_PASSWD_FILE());\n") fp.write(" output(screen,\" PRIVATE_DIR: %s\\n\",get_dyn_PRIVATE_DIR());\n") fp.write(" output(screen,\" BINDDNS_DIR: %s\\n\",get_dyn_BINDDNS_DIR());\n") fp.write("\n") def write_build_options_footer(fp): fp.write(" /* Output the sizes of the various cluster features */\n") fp.write(" output(screen, \"\\n%s\", cluster_support_features());\n") fp.write("\n") fp.write(" /* Output the sizes of the various types */\n") fp.write(" output(screen, \"\\nType sizes:\\n\");\n") fp.write(" output(screen, \" sizeof(char): %lu\\n\",(unsigned long)sizeof(char));\n") fp.write(" output(screen, \" sizeof(int): %lu\\n\",(unsigned long)sizeof(int));\n") fp.write(" output(screen, \" sizeof(long): %lu\\n\",(unsigned long)sizeof(long));\n") fp.write(" output(screen, \" sizeof(long long): %lu\\n\",(unsigned long)sizeof(long long));\n") fp.write(" output(screen, \" sizeof(uint8_t): %lu\\n\",(unsigned long)sizeof(uint8_t));\n") fp.write(" output(screen, \" sizeof(uint16_t): %lu\\n\",(unsigned long)sizeof(uint16_t));\n") fp.write(" output(screen, \" sizeof(uint32_t): %lu\\n\",(unsigned long)sizeof(uint32_t));\n") fp.write(" output(screen, \" sizeof(short): %lu\\n\",(unsigned long)sizeof(short));\n") fp.write(" output(screen, \" sizeof(void*): %lu\\n\",(unsigned long)sizeof(void*));\n") fp.write(" output(screen, \" sizeof(size_t): %lu\\n\",(unsigned long)sizeof(size_t));\n") fp.write(" output(screen, \" sizeof(off_t): %lu\\n\",(unsigned long)sizeof(off_t));\n") fp.write(" output(screen, \" sizeof(ino_t): %lu\\n\",(unsigned long)sizeof(ino_t));\n") fp.write(" output(screen, \" sizeof(dev_t): %lu\\n\",(unsigned long)sizeof(dev_t));\n") fp.write("\n") fp.write(" output(screen, \"\\nBuiltin modules:\\n\");\n") fp.write(" output(screen, \" %s\\n\", STRING_STATIC_MODULES);\n") fp.write("}\n") def write_build_options_section(fp, keys, section): fp.write("\n\t/* Show %s */\n" % section) fp.write(" output(screen, \"\\n%s:\\n\");\n\n" % section) for k in sorted(keys): fp.write("#ifdef %s\n" % k) fp.write(" output(screen, \" %s\\n\");\n" % k) fp.write("#endif\n") fp.write("\n") def write_build_options(task): tbl = task.env keys_option_with = [] keys_option_utmp = [] keys_option_have = [] keys_header_sys = [] keys_header_other = [] keys_misc = [] if sys.hexversion>0x300000f: trans_table = bytes.maketrans(b'.-()', b'____') else: import string trans_table = string.maketrans('.-()', '____') for key in tbl: if key.startswith("HAVE_UT_UT_") or key.find("UTMP") >= 0: keys_option_utmp.append(key) elif key.startswith("WITH_"): keys_option_with.append(key) elif key.startswith("HAVE_SYS_"): keys_header_sys.append(key) elif key.startswith("HAVE_"): if key.endswith("_H"): keys_header_other.append(key) else: keys_option_have.append(key) elif key.startswith("static_init_"): l = key.split("(") keys_misc.append(l[0]) else: keys_misc.append(key.translate(trans_table)) tgt = task.outputs[0].bldpath(task.env) f = open(tgt, 'w') write_build_options_header(f) write_build_options_section(f, keys_header_sys, "System Headers") write_build_options_section(f, keys_header_other, "Headers") write_build_options_section(f, keys_option_utmp, "UTMP Options") write_build_options_section(f, keys_option_have, "HAVE_* Defines") write_build_options_section(f, keys_option_with, "--with Options") write_build_options_section(f, keys_misc, "Build Options") write_build_options_footer(f) f.close() return 0 def SAMBA_BLDOPTIONS(bld, target): '''generate the bld_options.c for Samba''' t = bld.SAMBA_GENERATOR(target, rule=write_build_options, dep_vars=['defines'], target=target) Build.BuildContext.SAMBA_BLDOPTIONS = SAMBA_BLDOPTIONS tdb-1.4.2/buildtools/wafsamba/samba_perl.py0000660000000000000000000000410313444661620020647 0ustar rootroot00000000000000from waflib import Utils from waflib.Configure import conf from samba_utils import get_string done = {} @conf def SAMBA_CHECK_PERL(conf, mandatory=True, version=(5,0,0)): if "done" in done: return done["done"] = True conf.find_program('perl', var='PERL', mandatory=mandatory) conf.load('perl') path_perl = conf.find_program('perl') conf.env.PERL_SPECIFIED = (conf.env.PERL != path_perl) conf.check_perl_version(version) def read_perl_config_var(cmd): output = Utils.cmd_output([conf.env.get_flat('PERL'), '-MConfig', '-e', cmd]) if not isinstance(output, str): output = get_string(output) return Utils.to_list(output) def check_perl_config_var(var): conf.start_msg("Checking for perl $Config{%s}:" % var) try: v = read_perl_config_var('print $Config{%s}' % var)[0] conf.end_msg("'%s'" % (v), 'GREEN') return v except IndexError: conf.end_msg(False, 'YELLOW') pass return None vendor_prefix = check_perl_config_var('vendorprefix') perl_arch_install_dir = None if vendor_prefix == conf.env.PREFIX: perl_arch_install_dir = check_perl_config_var('vendorarch'); if perl_arch_install_dir is None: perl_arch_install_dir = "${LIBDIR}/perl5"; conf.start_msg("PERL_ARCH_INSTALL_DIR: ") conf.end_msg("'%s'" % (perl_arch_install_dir), 'GREEN') conf.env.PERL_ARCH_INSTALL_DIR = perl_arch_install_dir perl_lib_install_dir = None if vendor_prefix == conf.env.PREFIX: perl_lib_install_dir = check_perl_config_var('vendorlib'); if perl_lib_install_dir is None: perl_lib_install_dir = "${DATADIR}/perl5"; conf.start_msg("PERL_LIB_INSTALL_DIR: ") conf.end_msg("'%s'" % (perl_lib_install_dir), 'GREEN') conf.env.PERL_LIB_INSTALL_DIR = perl_lib_install_dir perl_inc = read_perl_config_var('print "@INC"') if '.' in perl_inc: perl_inc.remove('.') conf.start_msg("PERL_INC: ") conf.end_msg("%s" % (perl_inc), 'GREEN') conf.env.PERL_INC = perl_inc tdb-1.4.2/buildtools/wafsamba/samba_pidl.py0000660000000000000000000001303413444661620020640 0ustar rootroot00000000000000# waf build tool for building IDL files with pidl import os from waflib import Build, Utils from waflib.TaskGen import feature, before from samba_utils import SET_TARGET_TYPE, TO_LIST, LOCAL_CACHE def SAMBA_PIDL(bld, pname, source, options='', output_dir='.', generate_tables=True): '''Build a IDL file using pidl. This will produce up to 13 output files depending on the options used''' bname = source[0:-4]; # strip off the .idl suffix bname = os.path.basename(bname) name = "%s_%s" % (pname, bname.upper()) if not SET_TARGET_TYPE(bld, name, 'PIDL'): return bld.SET_BUILD_GROUP('build_source') # the output files depend on the options used. Use this dictionary # to map between the options and the resulting file names options_map = { '--header' : '%s.h', '--ndr-parser' : 'ndr_%s.c ndr_%s.h', '--samba3-ndr-server' : 'srv_%s.c srv_%s.h', '--samba3-ndr-client' : 'cli_%s.c cli_%s.h', '--server' : 'ndr_%s_s.c', '--client' : 'ndr_%s_c.c ndr_%s_c.h', '--python' : 'py_%s.c', '--tdr-parser' : 'tdr_%s.c tdr_%s.h', '--dcom-proxy' : '%s_p.c', '--com-header' : 'com_%s.h' } table_header_idx = None out_files = [] options_list = TO_LIST(options) for o in options_list: if o in options_map: ofiles = TO_LIST(options_map[o]) for f in ofiles: out_files.append(os.path.join(output_dir, f % bname)) if f == 'ndr_%s.h': # remember this one for the tables generation table_header_idx = len(out_files) - 1 # depend on the full pidl sources source = TO_LIST(source) try: pidl_src_nodes = bld.pidl_files_cache except AttributeError: bld.pidl_files_cache = bld.srcnode.ant_glob('pidl/lib/Parse/**/*.pm', flat=False) bld.pidl_files_cache.extend(bld.srcnode.ant_glob('pidl', flat=False)) pidl_src_nodes = bld.pidl_files_cache # the cd .. is needed because pidl currently is sensitive to the directory it is run in cpp = "" cc = "" if bld.CONFIG_SET("CPP") and bld.CONFIG_GET("CPP") != "": if isinstance(bld.CONFIG_GET("CPP"), list): cpp = 'CPP="%s"' % " ".join(bld.CONFIG_GET("CPP")) else: cpp = 'CPP="%s"' % bld.CONFIG_GET("CPP") if cpp == "CPP=xlc_r": cpp = "" if bld.CONFIG_SET("CC"): if isinstance(bld.CONFIG_GET("CC"), list): cc = 'CC="%s"' % " ".join(bld.CONFIG_GET("CC")) else: cc = 'CC="%s"' % bld.CONFIG_GET("CC") t = bld(rule='cd ${PIDL_LAUNCH_DIR} && %s %s ${PERL} ${PIDL} --quiet ${OPTIONS} --outputdir ${OUTPUTDIR} -- "${IDLSRC}"' % (cpp, cc), ext_out = '.c', before = 'c', update_outputs = True, shell = True, source = source, target = out_files, name = name, samba_type = 'PIDL') t.env.PIDL_LAUNCH_DIR = bld.srcnode.path_from(bld.bldnode) pnode = bld.srcnode.find_resource('pidl/pidl') t.env.PIDL = pnode.path_from(bld.srcnode) t.env.OPTIONS = TO_LIST(options) snode = t.path.find_resource(source[0]) t.env.IDLSRC = snode.path_from(bld.srcnode) t.env.OUTPUTDIR = bld.bldnode.path_from(bld.srcnode) + '/' + bld.path.find_dir(output_dir).path_from(bld.srcnode) bld.add_manual_dependency(snode, pidl_src_nodes) if generate_tables and table_header_idx is not None: pidl_headers = LOCAL_CACHE(bld, 'PIDL_HEADERS') pidl_headers[name] = [bld.path.find_or_declare(out_files[table_header_idx])] t.more_includes = '#' + bld.path.path_from(bld.srcnode) Build.BuildContext.SAMBA_PIDL = SAMBA_PIDL def SAMBA_PIDL_LIST(bld, name, source, options='', output_dir='.', generate_tables=True): '''A wrapper for building a set of IDL files''' for p in TO_LIST(source): bld.SAMBA_PIDL(name, p, options=options, output_dir=output_dir, generate_tables=generate_tables) Build.BuildContext.SAMBA_PIDL_LIST = SAMBA_PIDL_LIST ################################################################# # the rule for generating the NDR tables @feature('collect') @before('exec_rule') def collect(self): pidl_headers = LOCAL_CACHE(self.bld, 'PIDL_HEADERS') # The first source is tables.pl itself self.source = Utils.to_list(self.source) for (name, hd) in pidl_headers.items(): y = self.bld.get_tgen_by_name(name) self.bld.ASSERT(y is not None, 'Failed to find PIDL header %s' % name) y.post() for node in hd: self.bld.ASSERT(node is not None, 'Got None as build node generating PIDL table for %s' % name) self.source.append(node) def SAMBA_PIDL_TABLES(bld, name, target): '''generate the pidl NDR tables file''' bld.SET_BUILD_GROUP('main') t = bld( features = 'collect', rule = '${PERL} ${SRC} > ${TGT}', ext_out = '.c', before = 'c', update_outputs = True, shell = True, source = '../../librpc/tables.pl', target = target, name = name) t.env.LIBRPC = os.path.join(bld.srcnode.abspath(), 'librpc') Build.BuildContext.SAMBA_PIDL_TABLES = SAMBA_PIDL_TABLES tdb-1.4.2/buildtools/wafsamba/samba_python.py0000660000000000000000000001206713444661620021236 0ustar rootroot00000000000000# waf build tool for building IDL files with pidl import os, sys from waflib import Build, Logs, Utils, Configure, Errors from waflib.Configure import conf @conf def SAMBA_CHECK_PYTHON(conf, version=(3,4,0)): if conf.env.disable_python: version=(2,6,0) # enable tool to build python extensions if conf.env.HAVE_PYTHON_H: conf.check_python_version(version) return interpreters = [] conf.find_program('python3', var='PYTHON', mandatory=not conf.env.disable_python) conf.load('python') path_python = conf.find_program('python3') conf.env.PYTHON_SPECIFIED = (conf.env.PYTHON != path_python) conf.check_python_version(version) interpreters.append(conf.env['PYTHON']) conf.env.python_interpreters = interpreters @conf def SAMBA_CHECK_PYTHON_HEADERS(conf): if conf.env.disable_python: conf.msg("python headers", "Check disabled due to --disable-python") # we don't want PYTHONDIR in config.h, as otherwise changing # --prefix causes a complete rebuild conf.env.DEFINES = [x for x in conf.env.DEFINES if not x.startswith('PYTHONDIR=') and not x.startswith('PYTHONARCHDIR=')] return if conf.env["python_headers_checked"] == []: _check_python_headers(conf) conf.env["python_headers_checked"] = "yes" else: conf.msg("python headers", "using cache") # we don't want PYTHONDIR in config.h, as otherwise changing # --prefix causes a complete rebuild conf.env.DEFINES = [x for x in conf.env.DEFINES if not x.startswith('PYTHONDIR=') and not x.startswith('PYTHONARCHDIR=')] def _check_python_headers(conf): conf.check_python_headers() if conf.env['PYTHON_VERSION'] > '3': abi_pattern = os.path.splitext(conf.env['pyext_PATTERN'])[0] conf.env['PYTHON_SO_ABI_FLAG'] = abi_pattern % '' else: conf.env['PYTHON_SO_ABI_FLAG'] = '' conf.env['PYTHON_LIBNAME_SO_ABI_FLAG'] = ( conf.env['PYTHON_SO_ABI_FLAG'].replace('_', '-')) for lib in conf.env['LINKFLAGS_PYEMBED']: if lib.startswith('-L'): conf.env.append_unique('LIBPATH_PYEMBED', lib[2:]) # strip '-L' conf.env['LINKFLAGS_PYEMBED'].remove(lib) # same as in waf 1.5, keep only '-fno-strict-aliasing' # and ignore defines such as NDEBUG _FORTIFY_SOURCE=2 conf.env.DEFINES_PYEXT = [] conf.env.CFLAGS_PYEXT = ['-fno-strict-aliasing'] return def PYTHON_BUILD_IS_ENABLED(self): return self.CONFIG_SET('HAVE_PYTHON_H') Build.BuildContext.PYTHON_BUILD_IS_ENABLED = PYTHON_BUILD_IS_ENABLED def SAMBA_PYTHON(bld, name, source='', deps='', public_deps='', realname=None, cflags='', cflags_end=None, includes='', init_function_sentinel=None, local_include=True, vars=None, install=True, enabled=True): '''build a python extension for Samba''' # force-disable when we can't build python modules, so # every single call doesn't need to pass this in. if not bld.PYTHON_BUILD_IS_ENABLED(): enabled = False # when we support static python modules we'll need to gather # the list from all the SAMBA_PYTHON() targets if init_function_sentinel is not None: cflags += ' -DSTATIC_LIBPYTHON_MODULES=%s' % init_function_sentinel # From https://docs.python.org/2/c-api/arg.html: # Starting with Python 2.5 the type of the length argument to # PyArg_ParseTuple(), PyArg_ParseTupleAndKeywords() and PyArg_Parse() # can be controlled by defining the macro PY_SSIZE_T_CLEAN before # including Python.h. If the macro is defined, length is a Py_ssize_t # rather than an int. # Because if often included before includes.h/config.h # This must be in the -D compiler options cflags += ' -DPY_SSIZE_T_CLEAN=1' source = bld.EXPAND_VARIABLES(source, vars=vars) if realname is not None: link_name = 'python/%s' % realname else: link_name = None bld.SAMBA_LIBRARY(name, source=source, deps=deps, public_deps=public_deps, includes=includes, cflags=cflags, cflags_end=cflags_end, local_include=local_include, vars=vars, realname=realname, link_name=link_name, pyext=True, target_type='PYTHON', install_path='${PYTHONARCHDIR}', allow_undefined_symbols=True, install=install, enabled=enabled) Build.BuildContext.SAMBA_PYTHON = SAMBA_PYTHON def pyembed_libname(bld, name): if bld.env['PYTHON_SO_ABI_FLAG']: return name + bld.env['PYTHON_SO_ABI_FLAG'] else: return name Build.BuildContext.pyembed_libname = pyembed_libname tdb-1.4.2/buildtools/wafsamba/samba_third_party.py0000660000000000000000000000273613527011454022244 0ustar rootroot00000000000000# functions to support third party libraries import os from waflib import Utils, Build, Context from waflib.Configure import conf @conf def CHECK_FOR_THIRD_PARTY(conf): return os.path.exists(os.path.join(Context.g_module.top, 'third_party')) Build.BuildContext.CHECK_FOR_THIRD_PARTY = CHECK_FOR_THIRD_PARTY @conf def CHECK_POPT(conf): return conf.CHECK_BUNDLED_SYSTEM('popt', checkfunctions='poptGetContext', headers='popt.h') Build.BuildContext.CHECK_POPT = CHECK_POPT @conf def CHECK_CMOCKA(conf): return conf.CHECK_BUNDLED_SYSTEM_PKG('cmocka', minversion='1.1.3') Build.BuildContext.CHECK_CMOCKA = CHECK_CMOCKA @conf def CHECK_SOCKET_WRAPPER(conf): return conf.CHECK_BUNDLED_SYSTEM_PKG('socket_wrapper', minversion='1.2.3') Build.BuildContext.CHECK_SOCKET_WRAPPER = CHECK_SOCKET_WRAPPER @conf def CHECK_NSS_WRAPPER(conf): return conf.CHECK_BUNDLED_SYSTEM_PKG('nss_wrapper', minversion='1.1.6') Build.BuildContext.CHECK_NSS_WRAPPER = CHECK_NSS_WRAPPER @conf def CHECK_RESOLV_WRAPPER(conf): return conf.CHECK_BUNDLED_SYSTEM_PKG('resolv_wrapper', minversion='1.1.4') Build.BuildContext.CHECK_RESOLV_WRAPPER = CHECK_RESOLV_WRAPPER @conf def CHECK_UID_WRAPPER(conf): return conf.CHECK_BUNDLED_SYSTEM_PKG('uid_wrapper', minversion='1.2.7') Build.BuildContext.CHECK_UID_WRAPPER = CHECK_UID_WRAPPER @conf def CHECK_PAM_WRAPPER(conf): return conf.CHECK_BUNDLED_SYSTEM_PKG('pam_wrapper', minversion='1.0.7') Build.BuildContext.CHECK_PAM_WRAPPER = CHECK_PAM_WRAPPER tdb-1.4.2/buildtools/wafsamba/samba_utils.py0000660000000000000000000006062013444661620021053 0ustar rootroot00000000000000# a waf tool to add autoconf-like macros to the configure section # and for SAMBA_ macros for building libraries, binaries etc import errno import os, sys, re, fnmatch, shlex, inspect from optparse import SUPPRESS_HELP from waflib import Build, Options, Utils, Task, Logs, Configure, Errors, Context from waflib import Scripting from waflib.TaskGen import feature, before, after from waflib.Configure import ConfigurationContext from waflib.Logs import debug from waflib import ConfigSet from waflib.Build import CACHE_SUFFIX # TODO: make this a --option LIB_PATH="shared" PY3 = sys.version_info[0] == 3 if PY3: # helper function to get a string from a variable that maybe 'str' or # 'bytes' if 'bytes' then it is decoded using 'utf8'. If 'str' is passed # it is returned unchanged # Using this function is PY2/PY3 code should ensure in most cases # the PY2 code runs unchanged in PY2 whereas the code in PY3 possibly # decodes the variable (see PY2 implementation of this function below) def get_string(bytesorstring): tmp = bytesorstring if isinstance(bytesorstring, bytes): tmp = bytesorstring.decode('utf8') elif not isinstance(bytesorstring, str): raise ValueError('Expected byte of string for %s:%s' % (type(bytesorstring), bytesorstring)) return tmp else: # Helper function to return string. # if 'str' or 'unicode' passed in they are returned unchanged # otherwise an exception is generated # Using this function is PY2/PY3 code should ensure in most cases # the PY2 code runs unchanged in PY2 whereas the code in PY3 possibly # decodes the variable (see PY3 implementation of this function above) def get_string(bytesorstring): tmp = bytesorstring if not(isinstance(bytesorstring, str) or isinstance(bytesorstring, unicode)): raise ValueError('Expected str or unicode for %s:%s' % (type(bytesorstring), bytesorstring)) return tmp # sigh, python octal constants are a mess MODE_644 = int('644', 8) MODE_744 = int('744', 8) MODE_755 = int('755', 8) MODE_777 = int('777', 8) def conf(f): # override in order to propagate the argument "mandatory" def fun(*k, **kw): mandatory = True if 'mandatory' in kw: mandatory = kw['mandatory'] del kw['mandatory'] try: return f(*k, **kw) except Errors.ConfigurationError: if mandatory: raise fun.__name__ = f.__name__ if 'mandatory' in inspect.getsource(f): fun = f setattr(Configure.ConfigurationContext, f.__name__, fun) setattr(Build.BuildContext, f.__name__, fun) return f Configure.conf = conf Configure.conftest = conf @conf def SET_TARGET_TYPE(ctx, target, value): '''set the target type of a target''' cache = LOCAL_CACHE(ctx, 'TARGET_TYPE') if target in cache and cache[target] != 'EMPTY': Logs.error("ERROR: Target '%s' in directory %s re-defined as %s - was %s" % (target, ctx.path.abspath(), value, cache[target])) sys.exit(1) LOCAL_CACHE_SET(ctx, 'TARGET_TYPE', target, value) debug("task_gen: Target '%s' created of type '%s' in %s" % (target, value, ctx.path.abspath())) return True def GET_TARGET_TYPE(ctx, target): '''get target type from cache''' cache = LOCAL_CACHE(ctx, 'TARGET_TYPE') if not target in cache: return None return cache[target] def ADD_LD_LIBRARY_PATH(path): '''add something to LD_LIBRARY_PATH''' if 'LD_LIBRARY_PATH' in os.environ: oldpath = os.environ['LD_LIBRARY_PATH'] else: oldpath = '' newpath = oldpath.split(':') if not path in newpath: newpath.append(path) os.environ['LD_LIBRARY_PATH'] = ':'.join(newpath) def needs_private_lib(bld, target): '''return True if a target links to a private library''' for lib in getattr(target, "final_libs", []): t = bld.get_tgen_by_name(lib) if t and getattr(t, 'private_library', False): return True return False def install_rpath(target): '''the rpath value for installation''' bld = target.bld bld.env['RPATH'] = [] ret = set() if bld.env.RPATH_ON_INSTALL: ret.add(bld.EXPAND_VARIABLES(bld.env.LIBDIR)) if bld.env.RPATH_ON_INSTALL_PRIVATE and needs_private_lib(bld, target): ret.add(bld.EXPAND_VARIABLES(bld.env.PRIVATELIBDIR)) return list(ret) def build_rpath(bld): '''the rpath value for build''' rpaths = [os.path.normpath('%s/%s' % (bld.env.BUILD_DIRECTORY, d)) for d in ("shared", "shared/private")] bld.env['RPATH'] = [] if bld.env.RPATH_ON_BUILD: return rpaths for rpath in rpaths: ADD_LD_LIBRARY_PATH(rpath) return [] @conf def LOCAL_CACHE(ctx, name): '''return a named build cache dictionary, used to store state inside other functions''' if name in ctx.env: return ctx.env[name] ctx.env[name] = {} return ctx.env[name] @conf def LOCAL_CACHE_SET(ctx, cachename, key, value): '''set a value in a local cache''' cache = LOCAL_CACHE(ctx, cachename) cache[key] = value @conf def ASSERT(ctx, expression, msg): '''a build assert call''' if not expression: raise Errors.WafError("ERROR: %s\n" % msg) Build.BuildContext.ASSERT = ASSERT def SUBDIR(bld, subdir, list): '''create a list of files by pre-pending each with a subdir name''' ret = '' for l in TO_LIST(list): ret = ret + os.path.normpath(os.path.join(subdir, l)) + ' ' return ret Build.BuildContext.SUBDIR = SUBDIR def dict_concat(d1, d2): '''concatenate two dictionaries d1 += d2''' for t in d2: if t not in d1: d1[t] = d2[t] def ADD_COMMAND(opt, name, function): '''add a new top level command to waf''' Context.g_module.__dict__[name] = function opt.name = function Options.OptionsContext.ADD_COMMAND = ADD_COMMAND @feature('c', 'cc', 'cshlib', 'cprogram') @before('apply_core','exec_rule') def process_depends_on(self): '''The new depends_on attribute for build rules allow us to specify a dependency on output from a source generation rule''' if getattr(self , 'depends_on', None): lst = self.to_list(self.depends_on) for x in lst: y = self.bld.get_tgen_by_name(x) self.bld.ASSERT(y is not None, "Failed to find dependency %s of %s" % (x, self.name)) y.post() if getattr(y, 'more_includes', None): self.includes += " " + y.more_includes os_path_relpath = getattr(os.path, 'relpath', None) if os_path_relpath is None: # Python < 2.6 does not have os.path.relpath, provide a replacement # (imported from Python2.6.5~rc2) def os_path_relpath(path, start): """Return a relative version of a path""" start_list = os.path.abspath(start).split("/") path_list = os.path.abspath(path).split("/") # Work out how much of the filepath is shared by start and path. i = len(os.path.commonprefix([start_list, path_list])) rel_list = ['..'] * (len(start_list)-i) + path_list[i:] if not rel_list: return start return os.path.join(*rel_list) def unique_list(seq): '''return a uniquified list in the same order as the existing list''' seen = {} result = [] for item in seq: if item in seen: continue seen[item] = True result.append(item) return result def TO_LIST(str, delimiter=None): '''Split a list, preserving quoted strings and existing lists''' if str is None: return [] if isinstance(str, list): # we need to return a new independent list... return list(str) if len(str) == 0: return [] lst = str.split(delimiter) # the string may have had quotes in it, now we # check if we did have quotes, and use the slower shlex # if we need to for e in lst: if e[0] == '"': return shlex.split(str) return lst def subst_vars_error(string, env): '''substitute vars, throw an error if a variable is not defined''' lst = re.split('(\$\{\w+\})', string) out = [] for v in lst: if re.match('\$\{\w+\}', v): vname = v[2:-1] if not vname in env: raise KeyError("Failed to find variable %s in %s in env %s <%s>" % (vname, string, env.__class__, str(env))) v = env[vname] if isinstance(v, list): v = ' '.join(v) out.append(v) return ''.join(out) @conf def SUBST_ENV_VAR(ctx, varname): '''Substitute an environment variable for any embedded variables''' return subst_vars_error(ctx.env[varname], ctx.env) Build.BuildContext.SUBST_ENV_VAR = SUBST_ENV_VAR def recursive_dirlist(dir, relbase, pattern=None): '''recursive directory list''' ret = [] for f in os.listdir(dir): f2 = dir + '/' + f if os.path.isdir(f2): ret.extend(recursive_dirlist(f2, relbase)) else: if pattern and not fnmatch.fnmatch(f, pattern): continue ret.append(os_path_relpath(f2, relbase)) return ret def symlink(src, dst, force=True): """Can create symlink by force""" try: os.symlink(src, dst) except OSError as exc: if exc.errno == errno.EEXIST and force: os.remove(dst) os.symlink(src, dst) else: raise def mkdir_p(dir): '''like mkdir -p''' if not dir: return if dir.endswith("/"): mkdir_p(dir[:-1]) return if os.path.isdir(dir): return mkdir_p(os.path.dirname(dir)) os.mkdir(dir) def SUBST_VARS_RECURSIVE(string, env): '''recursively expand variables''' if string is None: return string limit=100 while (string.find('${') != -1 and limit > 0): string = subst_vars_error(string, env) limit -= 1 return string @conf def EXPAND_VARIABLES(ctx, varstr, vars=None): '''expand variables from a user supplied dictionary This is most useful when you pass vars=locals() to expand all your local variables in strings ''' if isinstance(varstr, list): ret = [] for s in varstr: ret.append(EXPAND_VARIABLES(ctx, s, vars=vars)) return ret if not isinstance(varstr, str): return varstr env = ConfigSet.ConfigSet() ret = varstr # substitute on user supplied dict if avaiilable if vars is not None: for v in vars.keys(): env[v] = vars[v] ret = SUBST_VARS_RECURSIVE(ret, env) # if anything left, subst on the environment as well if ret.find('${') != -1: ret = SUBST_VARS_RECURSIVE(ret, ctx.env) # make sure there is nothing left. Also check for the common # typo of $( instead of ${ if ret.find('${') != -1 or ret.find('$(') != -1: Logs.error('Failed to substitute all variables in varstr=%s' % ret) sys.exit(1) return ret Build.BuildContext.EXPAND_VARIABLES = EXPAND_VARIABLES def RUN_COMMAND(cmd, env=None, shell=False): '''run a external command, return exit code or signal''' if env: cmd = SUBST_VARS_RECURSIVE(cmd, env) status = os.system(cmd) if os.WIFEXITED(status): return os.WEXITSTATUS(status) if os.WIFSIGNALED(status): return - os.WTERMSIG(status) Logs.error("Unknown exit reason %d for command: %s" % (status, cmd)) return -1 def RUN_PYTHON_TESTS(testfiles, pythonpath=None, extra_env=None): env = LOAD_ENVIRONMENT() if pythonpath is None: pythonpath = os.path.join(Context.g_module.out, 'python') result = 0 for interp in env.python_interpreters: if not isinstance(interp, str): interp = ' '.join(interp) for testfile in testfiles: cmd = "PYTHONPATH=%s %s %s" % (pythonpath, interp, testfile) if extra_env: for key, value in extra_env.items(): cmd = "%s=%s %s" % (key, value, cmd) print('Running Python test with %s: %s' % (interp, testfile)) ret = RUN_COMMAND(cmd) if ret: print('Python test failed: %s' % cmd) result = ret return result # make sure we have md5. some systems don't have it try: from hashlib import md5 # Even if hashlib.md5 exists, it may be unusable. # Try to use MD5 function. In FIPS mode this will cause an exception # and we'll get to the replacement code foo = md5(b'abcd') except: try: import md5 # repeat the same check here, mere success of import is not enough. # Try to use MD5 function. In FIPS mode this will cause an exception foo = md5.md5(b'abcd') except: Context.SIG_NIL = hash('abcd') class replace_md5(object): def __init__(self): self.val = None def update(self, val): self.val = hash((self.val, val)) def digest(self): return str(self.val) def hexdigest(self): return self.digest().encode('hex') def replace_h_file(filename): f = open(filename, 'rb') m = replace_md5() while (filename): filename = f.read(100000) m.update(filename) f.close() return m.digest() Utils.md5 = replace_md5 Task.md5 = replace_md5 Utils.h_file = replace_h_file def LOAD_ENVIRONMENT(): '''load the configuration environment, allowing access to env vars from new commands''' env = ConfigSet.ConfigSet() try: p = os.path.join(Context.g_module.out, 'c4che/default'+CACHE_SUFFIX) env.load(p) except (OSError, IOError): pass return env def IS_NEWER(bld, file1, file2): '''return True if file1 is newer than file2''' curdir = bld.path.abspath() t1 = os.stat(os.path.join(curdir, file1)).st_mtime t2 = os.stat(os.path.join(curdir, file2)).st_mtime return t1 > t2 Build.BuildContext.IS_NEWER = IS_NEWER @conf def RECURSE(ctx, directory): '''recurse into a directory, relative to the curdir or top level''' try: visited_dirs = ctx.visited_dirs except AttributeError: visited_dirs = ctx.visited_dirs = set() d = os.path.join(ctx.path.abspath(), directory) if os.path.exists(d): abspath = os.path.abspath(d) else: abspath = os.path.abspath(os.path.join(Context.g_module.top, directory)) ctxclass = ctx.__class__.__name__ key = ctxclass + ':' + abspath if key in visited_dirs: # already done it return visited_dirs.add(key) relpath = os_path_relpath(abspath, ctx.path.abspath()) if ctxclass in ['tmp', 'OptionsContext', 'ConfigurationContext', 'BuildContext']: return ctx.recurse(relpath) if 'waflib.extras.compat15' in sys.modules: return ctx.recurse(relpath) Logs.error('Unknown RECURSE context class: {}'.format(ctxclass)) raise Options.OptionsContext.RECURSE = RECURSE Build.BuildContext.RECURSE = RECURSE def CHECK_MAKEFLAGS(options): '''check for MAKEFLAGS environment variable in case we are being called from a Makefile try to honor a few make command line flags''' if not 'WAF_MAKE' in os.environ: return makeflags = os.environ.get('MAKEFLAGS') if makeflags is None: makeflags = "" jobs_set = False jobs = None # we need to use shlex.split to cope with the escaping of spaces # in makeflags for opt in shlex.split(makeflags): # options can come either as -x or as x if opt[0:2] == 'V=': options.verbose = Logs.verbose = int(opt[2:]) if Logs.verbose > 0: Logs.zones = ['runner'] if Logs.verbose > 2: Logs.zones = ['*'] elif opt[0].isupper() and opt.find('=') != -1: # this allows us to set waf options on the make command line # for example, if you do "make FOO=blah", then we set the # option 'FOO' in Options.options, to blah. If you look in wafsamba/wscript # you will see that the command line accessible options have their dest= # set to uppercase, to allow for passing of options from make in this way # this is also how "make test TESTS=testpattern" works, and # "make VERBOSE=1" as well as things like "make SYMBOLCHECK=1" loc = opt.find('=') setattr(options, opt[0:loc], opt[loc+1:]) elif opt[0] != '-': for v in opt: if re.search(r'j[0-9]*$', v): jobs_set = True jobs = opt.strip('j') elif v == 'k': options.keep = True elif re.search(r'-j[0-9]*$', opt): jobs_set = True jobs = opt.strip('-j') elif opt == '-k': options.keep = True if not jobs_set: # default to one job options.jobs = 1 elif jobs_set and jobs: options.jobs = int(jobs) waflib_options_parse_cmd_args = Options.OptionsContext.parse_cmd_args def wafsamba_options_parse_cmd_args(self, _args=None, cwd=None, allow_unknown=False): (options, commands, envvars) = \ waflib_options_parse_cmd_args(self, _args=_args, cwd=cwd, allow_unknown=allow_unknown) CHECK_MAKEFLAGS(options) if options.jobs == 1: # # waflib.Runner.Parallel processes jobs inline if the possible number # of jobs is just 1. But (at least in waf <= 2.0.12) it still calls # create a waflib.Runner.Spawner() which creates a single # waflib.Runner.Consumer() thread that tries to process jobs from the # queue. # # This has strange effects, which are not noticed typically, # but at least on AIX python has broken threading and fails # in random ways. # # So we just add a dummy Spawner class. class NoOpSpawner(object): def __init__(self, master): return from waflib import Runner Runner.Spawner = NoOpSpawner return options, commands, envvars Options.OptionsContext.parse_cmd_args = wafsamba_options_parse_cmd_args option_groups = {} def option_group(opt, name): '''find or create an option group''' global option_groups if name in option_groups: return option_groups[name] gr = opt.add_option_group(name) option_groups[name] = gr return gr Options.OptionsContext.option_group = option_group def save_file(filename, contents, create_dir=False): '''save data to a file''' if create_dir: mkdir_p(os.path.dirname(filename)) try: f = open(filename, 'w') f.write(contents) f.close() except: return False return True def load_file(filename): '''return contents of a file''' try: f = open(filename, 'r') r = f.read() f.close() except: return None return r def reconfigure(ctx): '''rerun configure if necessary''' if not os.path.exists(os.environ.get('WAFLOCK', '.lock-wscript')): raise Errors.WafError('configure has not been run') import samba_wildcard bld = samba_wildcard.fake_build_environment() Configure.autoconfig = True Scripting.check_configured(bld) def map_shlib_extension(ctx, name, python=False): '''map a filename with a shared library extension of .so to the real shlib name''' if name is None: return None if name[-1:].isdigit(): # some libraries have specified versions in the wscript rule return name (root1, ext1) = os.path.splitext(name) if python: return ctx.env.pyext_PATTERN % root1 else: (root2, ext2) = os.path.splitext(ctx.env.cshlib_PATTERN) return root1+ext2 Build.BuildContext.map_shlib_extension = map_shlib_extension def apply_pattern(filename, pattern): '''apply a filename pattern to a filename that may have a directory component''' dirname = os.path.dirname(filename) if not dirname: return pattern % filename basename = os.path.basename(filename) return os.path.join(dirname, pattern % basename) def make_libname(ctx, name, nolibprefix=False, version=None, python=False): """make a library filename Options: nolibprefix: don't include the lib prefix version : add a version number python : if we should use python module name conventions""" if python: libname = apply_pattern(name, ctx.env.pyext_PATTERN) else: libname = apply_pattern(name, ctx.env.cshlib_PATTERN) if nolibprefix and libname[0:3] == 'lib': libname = libname[3:] if version: if version[0] == '.': version = version[1:] (root, ext) = os.path.splitext(libname) if ext == ".dylib": # special case - version goes before the prefix libname = "%s.%s%s" % (root, version, ext) else: libname = "%s%s.%s" % (root, ext, version) return libname Build.BuildContext.make_libname = make_libname def get_tgt_list(bld): '''return a list of build objects for samba''' targets = LOCAL_CACHE(bld, 'TARGET_TYPE') # build a list of task generators we are interested in tgt_list = [] for tgt in targets: type = targets[tgt] if not type in ['SUBSYSTEM', 'MODULE', 'BINARY', 'LIBRARY', 'ASN1', 'PYTHON']: continue t = bld.get_tgen_by_name(tgt) if t is None: Logs.error("Target %s of type %s has no task generator" % (tgt, type)) sys.exit(1) tgt_list.append(t) return tgt_list from waflib.Context import WSCRIPT_FILE def PROCESS_SEPARATE_RULE(self, rule): ''' cause waf to process additional script based on `rule'. You should have file named wscript__rule in the current directory where stage is either 'configure' or 'build' ''' stage = '' if isinstance(self, Configure.ConfigurationContext): stage = 'configure' elif isinstance(self, Build.BuildContext): stage = 'build' file_path = os.path.join(self.path.abspath(), WSCRIPT_FILE+'_'+stage+'_'+rule) node = self.root.find_node(file_path) if node: try: cache = self.recurse_cache except AttributeError: cache = self.recurse_cache = {} if node not in cache: cache[node] = True self.pre_recurse(node) try: function_code = node.read('rU', None) exec(compile(function_code, node.abspath(), 'exec'), self.exec_dict) finally: self.post_recurse(node) Build.BuildContext.PROCESS_SEPARATE_RULE = PROCESS_SEPARATE_RULE ConfigurationContext.PROCESS_SEPARATE_RULE = PROCESS_SEPARATE_RULE def AD_DC_BUILD_IS_ENABLED(self): if self.CONFIG_SET('AD_DC_BUILD_IS_ENABLED'): return True return False Build.BuildContext.AD_DC_BUILD_IS_ENABLED = AD_DC_BUILD_IS_ENABLED @feature('cprogram', 'cshlib', 'cstaticlib') @after('apply_lib_vars') @before('apply_obj_vars') def samba_before_apply_obj_vars(self): """before apply_obj_vars for uselib, this removes the standard paths""" def is_standard_libpath(env, path): for _path in env.STANDARD_LIBPATH: if _path == os.path.normpath(path): return True return False v = self.env for i in v['RPATH']: if is_standard_libpath(v, i): v['RPATH'].remove(i) for i in v['LIBPATH']: if is_standard_libpath(v, i): v['LIBPATH'].remove(i) def samba_add_onoff_option(opt, option, help=(), dest=None, default=True, with_name="with", without_name="without"): if default is None: default_str = "auto" elif default is True: default_str = "yes" elif default is False: default_str = "no" else: default_str = str(default) if help == (): help = ("Build with %s support (default=%s)" % (option, default_str)) if dest is None: dest = "with_%s" % option.replace('-', '_') with_val = "--%s-%s" % (with_name, option) without_val = "--%s-%s" % (without_name, option) opt.add_option(with_val, help=help, action="store_true", dest=dest, default=default) opt.add_option(without_val, help=SUPPRESS_HELP, action="store_false", dest=dest) Options.OptionsContext.samba_add_onoff_option = samba_add_onoff_option tdb-1.4.2/buildtools/wafsamba/samba_version.py0000660000000000000000000002204513444661620021377 0ustar rootroot00000000000000import os, sys from waflib import Utils, Context import samba_utils from samba_git import find_git def git_version_summary(path, env=None): git = find_git(env) if git is None: return ("GIT-UNKNOWN", {}) env.GIT = git environ = dict(os.environ) environ["GIT_DIR"] = '%s/.git' % path environ["GIT_WORK_TREE"] = path git = samba_utils.get_string(Utils.cmd_output(env.GIT + ' show --pretty=format:"%h%n%ct%n%H%n%cd" --stat HEAD', silent=True, env=environ)) lines = git.splitlines() if not lines or len(lines) < 4: return ("GIT-UNKNOWN", {}) fields = { "GIT_COMMIT_ABBREV": lines[0], "GIT_COMMIT_FULLREV": lines[2], "COMMIT_TIME": int(lines[1]), "COMMIT_DATE": lines[3], } ret = "GIT-" + fields["GIT_COMMIT_ABBREV"] if env.GIT_LOCAL_CHANGES: clean = Utils.cmd_output('%s diff HEAD | wc -l' % env.GIT, silent=True).strip() if clean == "0": fields["COMMIT_IS_CLEAN"] = 1 else: fields["COMMIT_IS_CLEAN"] = 0 ret += "+" return (ret, fields) def distversion_version_summary(path): #get version from .distversion file suffix = None fields = {} for line in Utils.readf(path + '/.distversion').splitlines(): if line == '': continue if line.startswith("#"): continue try: split_line = line.split("=") if split_line[1] != "": key = split_line[0] value = split_line[1] if key == "SUFFIX": suffix = value continue fields[key] = value except: print("Failed to parse line %s from .distversion file." % (line)) raise if "COMMIT_TIME" in fields: fields["COMMIT_TIME"] = int(fields["COMMIT_TIME"]) if suffix is None: return ("UNKNOWN", fields) return (suffix, fields) class SambaVersion(object): def __init__(self, version_dict, path, env=None, is_install=True): '''Determine the version number of samba See VERSION for the format. Entries on that file are also accepted as dictionary entries here ''' self.MAJOR=None self.MINOR=None self.RELEASE=None self.REVISION=None self.TP_RELEASE=None self.ALPHA_RELEASE=None self.BETA_RELEASE=None self.PRE_RELEASE=None self.RC_RELEASE=None self.IS_SNAPSHOT=True self.RELEASE_NICKNAME=None self.VENDOR_SUFFIX=None self.VENDOR_PATCH=None for a, b in version_dict.items(): if a.startswith("SAMBA_VERSION_"): setattr(self, a[14:], b) else: setattr(self, a, b) if self.IS_GIT_SNAPSHOT == "yes": self.IS_SNAPSHOT=True elif self.IS_GIT_SNAPSHOT == "no": self.IS_SNAPSHOT=False else: raise Exception("Unknown value for IS_GIT_SNAPSHOT: %s" % self.IS_GIT_SNAPSHOT) ## ## start with "3.0.22" ## self.MAJOR=int(self.MAJOR) self.MINOR=int(self.MINOR) self.RELEASE=int(self.RELEASE) SAMBA_VERSION_STRING = ("%u.%u.%u" % (self.MAJOR, self.MINOR, self.RELEASE)) ## ## maybe add "3.0.22a" or "4.0.0tp11" or "4.0.0alpha1" or "4.0.0beta1" or "3.0.22pre1" or "3.0.22rc1" ## We do not do pre or rc version on patch/letter releases ## if self.REVISION is not None: SAMBA_VERSION_STRING += self.REVISION if self.TP_RELEASE is not None: self.TP_RELEASE = int(self.TP_RELEASE) SAMBA_VERSION_STRING += "tp%u" % self.TP_RELEASE if self.ALPHA_RELEASE is not None: self.ALPHA_RELEASE = int(self.ALPHA_RELEASE) SAMBA_VERSION_STRING += ("alpha%u" % self.ALPHA_RELEASE) if self.BETA_RELEASE is not None: self.BETA_RELEASE = int(self.BETA_RELEASE) SAMBA_VERSION_STRING += ("beta%u" % self.BETA_RELEASE) if self.PRE_RELEASE is not None: self.PRE_RELEASE = int(self.PRE_RELEASE) SAMBA_VERSION_STRING += ("pre%u" % self.PRE_RELEASE) if self.RC_RELEASE is not None: self.RC_RELEASE = int(self.RC_RELEASE) SAMBA_VERSION_STRING += ("rc%u" % self.RC_RELEASE) if self.IS_SNAPSHOT: if not is_install: suffix = "DEVELOPERBUILD" self.vcs_fields = {} elif os.path.exists(os.path.join(path, ".git")): suffix, self.vcs_fields = git_version_summary(path, env=env) elif os.path.exists(os.path.join(path, ".distversion")): suffix, self.vcs_fields = distversion_version_summary(path) else: suffix = "UNKNOWN" self.vcs_fields = {} self.vcs_fields["SUFFIX"] = suffix SAMBA_VERSION_STRING += "-" + suffix else: self.vcs_fields = {} self.OFFICIAL_STRING = SAMBA_VERSION_STRING if self.VENDOR_SUFFIX is not None: SAMBA_VERSION_STRING += ("-" + self.VENDOR_SUFFIX) self.VENDOR_SUFFIX = self.VENDOR_SUFFIX if self.VENDOR_PATCH is not None: SAMBA_VERSION_STRING += ("-" + self.VENDOR_PATCH) self.VENDOR_PATCH = self.VENDOR_PATCH self.STRING = SAMBA_VERSION_STRING if self.RELEASE_NICKNAME is not None: self.STRING_WITH_NICKNAME = "%s (%s)" % (self.STRING, self.RELEASE_NICKNAME) else: self.STRING_WITH_NICKNAME = self.STRING def __str__(self): string="/* Autogenerated by waf */\n" string+="#define SAMBA_VERSION_MAJOR %u\n" % self.MAJOR string+="#define SAMBA_VERSION_MINOR %u\n" % self.MINOR string+="#define SAMBA_VERSION_RELEASE %u\n" % self.RELEASE if self.REVISION is not None: string+="#define SAMBA_VERSION_REVISION %u\n" % self.REVISION if self.TP_RELEASE is not None: string+="#define SAMBA_VERSION_TP_RELEASE %u\n" % self.TP_RELEASE if self.ALPHA_RELEASE is not None: string+="#define SAMBA_VERSION_ALPHA_RELEASE %u\n" % self.ALPHA_RELEASE if self.BETA_RELEASE is not None: string+="#define SAMBA_VERSION_BETA_RELEASE %u\n" % self.BETA_RELEASE if self.PRE_RELEASE is not None: string+="#define SAMBA_VERSION_PRE_RELEASE %u\n" % self.PRE_RELEASE if self.RC_RELEASE is not None: string+="#define SAMBA_VERSION_RC_RELEASE %u\n" % self.RC_RELEASE for name in sorted(self.vcs_fields.keys()): string+="#define SAMBA_VERSION_%s " % name value = self.vcs_fields[name] string_types = str if sys.version_info[0] < 3: string_types = basestring if isinstance(value, string_types): string += "\"%s\"" % value elif type(value) is int: string += "%d" % value else: raise Exception("Unknown type for %s: %r" % (name, value)) string += "\n" string+="#define SAMBA_VERSION_OFFICIAL_STRING \"" + self.OFFICIAL_STRING + "\"\n" if self.VENDOR_SUFFIX is not None: string+="#define SAMBA_VERSION_VENDOR_SUFFIX " + self.VENDOR_SUFFIX + "\n" if self.VENDOR_PATCH is not None: string+="#define SAMBA_VERSION_VENDOR_PATCH " + self.VENDOR_PATCH + "\n" if self.RELEASE_NICKNAME is not None: string+="#define SAMBA_VERSION_RELEASE_NICKNAME " + self.RELEASE_NICKNAME + "\n" # We need to put this #ifdef in to the headers so that vendors can override the version with a function string+=''' #ifdef SAMBA_VERSION_VENDOR_FUNCTION # define SAMBA_VERSION_STRING SAMBA_VERSION_VENDOR_FUNCTION #else /* SAMBA_VERSION_VENDOR_FUNCTION */ # define SAMBA_VERSION_STRING "''' + self.STRING_WITH_NICKNAME + '''" #endif ''' string+="/* Version for mkrelease.sh: \nSAMBA_VERSION_STRING=" + self.STRING_WITH_NICKNAME + "\n */\n" return string def samba_version_file(version_file, path, env=None, is_install=True): '''Parse the version information from a VERSION file''' f = open(version_file, 'r') version_dict = {} for line in f: line = line.strip() if line == '': continue if line.startswith("#"): continue try: split_line = line.split("=") if split_line[1] != "": value = split_line[1].strip('"') version_dict[split_line[0]] = value except: print("Failed to parse line %s from %s" % (line, version_file)) raise return SambaVersion(version_dict, path, env=env, is_install=is_install) def load_version(env=None, is_install=True): '''load samba versions either from ./VERSION or git return a version object for detailed breakdown''' if not env: env = samba_utils.LOAD_ENVIRONMENT() version = samba_version_file("./VERSION", ".", env, is_install=is_install) Context.g_module.VERSION = version.STRING return version tdb-1.4.2/buildtools/wafsamba/samba_waf18.py0000660000000000000000000003405713526763114020650 0ustar rootroot00000000000000# compatibility layer for building with more recent waf versions import os, shlex, sys from waflib import Build, Configure, Node, Utils, Options, Logs, TaskGen from waflib import ConfigSet from waflib.TaskGen import feature, after from waflib.Configure import conf, ConfigurationContext from waflib.Tools.flex import decide_ext # This version of flexfun runs in tsk.get_cwd() as opposed to the # bld.variant_dir: since input paths adjusted against tsk.get_cwd(), we have to # use tsk.get_cwd() for the work directory as well. def flexfun(tsk): env = tsk.env bld = tsk.generator.bld def to_list(xx): if isinstance(xx, str): return [xx] return xx tsk.last_cmd = lst = [] lst.extend(to_list(env.FLEX)) lst.extend(to_list(env.FLEXFLAGS)) inputs = [a.path_from(tsk.get_cwd()) for a in tsk.inputs] if env.FLEX_MSYS: inputs = [x.replace(os.sep, '/') for x in inputs] lst.extend(inputs) lst = [x for x in lst if x] txt = bld.cmd_and_log(lst, cwd=tsk.get_cwd(), env=env.env or None, quiet=0) tsk.outputs[0].write(txt.replace('\r\n', '\n').replace('\r', '\n')) # issue #1207 TaskGen.declare_chain( name = 'flex', rule = flexfun, # issue #854 ext_in = '.l', decider = decide_ext, ) for y in (Build.BuildContext, Build.CleanContext, Build.InstallContext, Build.UninstallContext, Build.ListContext): class tmp(y): variant = 'default' def abspath(self, env=None): if env and hasattr(self, 'children'): return self.get_bld().abspath() return self.old_abspath() Node.Node.old_abspath = Node.Node.abspath Node.Node.abspath = abspath def bldpath(self, env=None): return self.abspath() #return self.path_from(self.ctx.bldnode.parent) Node.Node.bldpath = bldpath def srcpath(self, env=None): return self.abspath() #return self.path_from(self.ctx.bldnode.parent) Node.Node.srcpath = srcpath def store_fast(self, filename): file = open(filename, 'wb') data = self.get_merged_dict() try: Build.cPickle.dump(data, file, -1) finally: file.close() ConfigSet.ConfigSet.store_fast = store_fast def load_fast(self, filename): file = open(filename, 'rb') try: data = Build.cPickle.load(file) finally: file.close() self.table.update(data) ConfigSet.ConfigSet.load_fast = load_fast @feature('c', 'cxx', 'd', 'asm', 'fc', 'includes') @after('propagate_uselib_vars', 'process_source') def apply_incpaths(self): lst = self.to_incnodes(self.to_list(getattr(self, 'includes', [])) + self.env['INCLUDES']) self.includes_nodes = lst cwdx = getattr(self.bld, 'cwdx', self.bld.bldnode) self.env['INCPATHS'] = [x.path_from(cwdx) for x in lst] @conf def define(self, key, val, quote=True, comment=None): assert key and isinstance(key, str) if val is None: val = () elif isinstance(val, bool): val = int(val) # waf 1.5 self.env[key] = val if isinstance(val, int) or isinstance(val, float): s = '%s=%s' else: s = quote and '%s="%s"' or '%s=%s' app = s % (key, str(val)) ban = key + '=' lst = self.env.DEFINES for x in lst: if x.startswith(ban): lst[lst.index(x)] = app break else: self.env.append_value('DEFINES', app) self.env.append_unique('define_key', key) # compat15 removes this but we want to keep it @conf def undefine(self, key, from_env=True, comment=None): assert key and isinstance(key, str) ban = key + '=' self.env.DEFINES = [x for x in self.env.DEFINES if not x.startswith(ban)] self.env.append_unique('define_key', key) # waf 1.5 if from_env: self.env[key] = () class ConfigurationContext(Configure.ConfigurationContext): def init_dirs(self): self.setenv('default') self.env.merge_config_header = True return super(ConfigurationContext, self).init_dirs() def find_program_samba(self, *k, **kw): # Override the waf default set in the @conf decorator in Configure.py if 'mandatory' not in kw: kw['mandatory'] = False ret = self.find_program_old(*k, **kw) return ret Configure.ConfigurationContext.find_program_old = Configure.ConfigurationContext.find_program Configure.ConfigurationContext.find_program = find_program_samba Build.BuildContext.ENFORCE_GROUP_ORDERING = Utils.nada Build.BuildContext.AUTOCLEANUP_STALE_FILES = Utils.nada @conf def check(self, *k, **kw): '''Override the waf defaults to inject --with-directory options''' # match the configuration test with speficic options, for example: # --with-libiconv -> Options.options.iconv_open -> "Checking for library iconv" self.validate_c(kw) additional_dirs = [] if 'msg' in kw: msg = kw['msg'] for x in Options.OptionsContext.parser.parser.option_list: if getattr(x, 'match', None) and msg in x.match: d = getattr(Options.options, x.dest, '') if d: additional_dirs.append(d) # we add the additional dirs twice: once for the test data, and again if the compilation test suceeds below def add_options_dir(dirs, env): for x in dirs: if not x in env.CPPPATH: env.CPPPATH = [os.path.join(x, 'include')] + env.CPPPATH if not x in env.LIBPATH: env.LIBPATH = [os.path.join(x, 'lib')] + env.LIBPATH add_options_dir(additional_dirs, kw['env']) self.start_msg(kw['msg'], **kw) ret = None try: ret = self.run_build(*k, **kw) except self.errors.ConfigurationError: self.end_msg(kw['errmsg'], 'YELLOW', **kw) if Logs.verbose > 1: raise else: self.fatal('The configuration failed') else: kw['success'] = ret # success! time for brandy add_options_dir(additional_dirs, self.env) ret = self.post_check(*k, **kw) if not ret: self.end_msg(kw['errmsg'], 'YELLOW', **kw) self.fatal('The configuration failed %r' % ret) else: self.end_msg(self.ret_msg(kw['okmsg'], kw), **kw) return ret @conf def CHECK_LIBRARY_SUPPORT(conf, rpath=False, version_script=False, msg=None): '''see if the platform supports building libraries''' if msg is None: if rpath: msg = "rpath library support" else: msg = "building library support" def build(bld): lib_node = bld.srcnode.make_node('libdir/liblc1.c') lib_node.parent.mkdir() lib_node.write('int lib_func(void) { return 42; }\n', 'w') main_node = bld.srcnode.make_node('main.c') main_node.write('int main(void) {return !(lib_func() == 42);}', 'w') linkflags = [] if version_script: script = bld.srcnode.make_node('ldscript') script.write('TEST_1.0A2 { global: *; };\n', 'w') linkflags.append('-Wl,--version-script=%s' % script.abspath()) bld(features='c cshlib', source=lib_node, target='lib1', linkflags=linkflags, name='lib1') o = bld(features='c cprogram', source=main_node, target='prog1', uselib_local='lib1') if rpath: o.rpath = [lib_node.parent.abspath()] def run_app(self): args = conf.SAMBA_CROSS_ARGS(msg=msg) env = dict(os.environ) env['LD_LIBRARY_PATH'] = self.inputs[0].parent.abspath() + os.pathsep + env.get('LD_LIBRARY_PATH', '') self.generator.bld.cmd_and_log([self.inputs[0].abspath()] + args, env=env) o.post() bld(rule=run_app, source=o.link_task.outputs[0]) # ok, so it builds try: conf.check(build_fun=build, msg='Checking for %s' % msg) except conf.errors.ConfigurationError: return False return True @conf def CHECK_NEED_LC(conf, msg): '''check if we need -lc''' def build(bld): lib_node = bld.srcnode.make_node('libdir/liblc1.c') lib_node.parent.mkdir() lib_node.write('#include \nint lib_func(void) { FILE *f = fopen("foo", "r");}\n', 'w') bld(features='c cshlib', source=[lib_node], linkflags=conf.env.EXTRA_LDFLAGS, target='liblc') try: conf.check(build_fun=build, msg=msg, okmsg='-lc is unnecessary', errmsg='-lc is necessary') except conf.errors.ConfigurationError: return False return True # already implemented on "waf -v" def order(bld, tgt_list): return True Build.BuildContext.check_group_ordering = order @conf def CHECK_CFG(self, *k, **kw): if 'args' in kw: kw['args'] = shlex.split(kw['args']) if not 'mandatory' in kw: kw['mandatory'] = False kw['global_define'] = True return self.check_cfg(*k, **kw) def cmd_output(cmd, **kw): silent = False if 'silent' in kw: silent = kw['silent'] del(kw['silent']) if 'e' in kw: tmp = kw['e'] del(kw['e']) kw['env'] = tmp kw['shell'] = isinstance(cmd, str) kw['stdout'] = Utils.subprocess.PIPE if silent: kw['stderr'] = Utils.subprocess.PIPE try: p = Utils.subprocess.Popen(cmd, **kw) output = p.communicate()[0] except OSError as e: raise ValueError(str(e)) if p.returncode: if not silent: msg = "command execution failed: %s -> %r" % (cmd, str(output)) raise ValueError(msg) output = '' return output Utils.cmd_output = cmd_output @TaskGen.feature('c', 'cxx', 'd') @TaskGen.before('apply_incpaths', 'propagate_uselib_vars') @TaskGen.after('apply_link', 'process_source') def apply_uselib_local(self): """ process the uselib_local attribute execute after apply_link because of the execution order set on 'link_task' """ env = self.env from waflib.Tools.ccroot import stlink_task # 1. the case of the libs defined in the project (visit ancestors first) # the ancestors external libraries (uselib) will be prepended self.uselib = self.to_list(getattr(self, 'uselib', [])) self.includes = self.to_list(getattr(self, 'includes', [])) names = self.to_list(getattr(self, 'uselib_local', [])) get = self.bld.get_tgen_by_name seen = set() seen_uselib = set() tmp = Utils.deque(names) # consume a copy of the list of names if tmp: if Logs.verbose: Logs.warn('compat: "uselib_local" is deprecated, replace by "use"') while tmp: lib_name = tmp.popleft() # visit dependencies only once if lib_name in seen: continue y = get(lib_name) y.post() seen.add(lib_name) # object has ancestors to process (shared libraries): add them to the end of the list if getattr(y, 'uselib_local', None): for x in self.to_list(getattr(y, 'uselib_local', [])): obj = get(x) obj.post() if getattr(obj, 'link_task', None): if not isinstance(obj.link_task, stlink_task): tmp.append(x) # link task and flags if getattr(y, 'link_task', None): link_name = y.target[y.target.rfind(os.sep) + 1:] if isinstance(y.link_task, stlink_task): env.append_value('STLIB', [link_name]) else: # some linkers can link against programs env.append_value('LIB', [link_name]) # the order self.link_task.set_run_after(y.link_task) # for the recompilation self.link_task.dep_nodes += y.link_task.outputs # add the link path too tmp_path = y.link_task.outputs[0].parent.bldpath() if not tmp_path in env['LIBPATH']: env.prepend_value('LIBPATH', [tmp_path]) # add ancestors uselib too - but only propagate those that have no staticlib defined for v in self.to_list(getattr(y, 'uselib', [])): if v not in seen_uselib: seen_uselib.add(v) if not env['STLIB_' + v]: if not v in self.uselib: self.uselib.insert(0, v) # if the library task generator provides 'export_includes', add to the include path # the export_includes must be a list of paths relative to the other library if getattr(y, 'export_includes', None): self.includes.extend(y.to_incnodes(y.export_includes)) @TaskGen.feature('cprogram', 'cxxprogram', 'cstlib', 'cxxstlib', 'cshlib', 'cxxshlib', 'dprogram', 'dstlib', 'dshlib') @TaskGen.after('apply_link') def apply_objdeps(self): "add the .o files produced by some other object files in the same manner as uselib_local" names = getattr(self, 'add_objects', []) if not names: return names = self.to_list(names) get = self.bld.get_tgen_by_name seen = [] while names: x = names[0] # visit dependencies only once if x in seen: names = names[1:] continue # object does not exist ? y = get(x) # object has ancestors to process first ? update the list of names if getattr(y, 'add_objects', None): added = 0 lst = y.to_list(y.add_objects) lst.reverse() for u in lst: if u in seen: continue added = 1 names = [u]+names if added: continue # list of names modified, loop # safe to process the current object y.post() seen.append(x) for t in getattr(y, 'compiled_tasks', []): self.link_task.inputs.extend(t.outputs) @TaskGen.after('apply_link') def process_obj_files(self): if not hasattr(self, 'obj_files'): return for x in self.obj_files: node = self.path.find_resource(x) self.link_task.inputs.append(node) @TaskGen.taskgen_method def add_obj_file(self, file): """Small example on how to link object files as if they were source obj = bld.create_obj('cc') obj.add_obj_file('foo.o')""" if not hasattr(self, 'obj_files'): self.obj_files = [] if not 'process_obj_files' in self.meths: self.meths.append('process_obj_files') self.obj_files.append(file) tdb-1.4.2/buildtools/wafsamba/samba_wildcard.py0000660000000000000000000001063413444661620021504 0ustar rootroot00000000000000# based on playground/evil in the waf svn tree import os, datetime, fnmatch from waflib import Scripting, Utils, Options, Logs, Errors from waflib import ConfigSet, Context from samba_utils import LOCAL_CACHE, os_path_relpath def run_task(t, k): '''run a single build task''' ret = t.run() if ret: raise Errors.WafError("Failed to build %s: %u" % (k, ret)) def run_named_build_task(cmd): '''run a named build task, matching the cmd name using fnmatch wildcards against inputs and outputs of all build tasks''' bld = fake_build_environment(info=False) found = False cwd_node = bld.root.find_dir(os.getcwd()) top_node = bld.root.find_dir(bld.srcnode.abspath()) cmd = os.path.normpath(cmd) # cope with builds of bin/*/* if os.path.islink(cmd): cmd = os_path_relpath(os.readlink(cmd), os.getcwd()) if cmd[0:12] == "bin/default/": cmd = cmd[12:] for g in bld.task_manager.groups: for attr in ['outputs', 'inputs']: for t in g.tasks: s = getattr(t, attr, []) for k in s: relpath1 = k.relpath_gen(cwd_node) relpath2 = k.relpath_gen(top_node) if (fnmatch.fnmatch(relpath1, cmd) or fnmatch.fnmatch(relpath2, cmd)): t.position = [0,0] print(t.display()) run_task(t, k) found = True if not found: raise Errors.WafError("Unable to find build target matching %s" % cmd) def rewrite_compile_targets(): '''cope with the bin/ form of compile target''' if not Options.options.compile_targets: return bld = fake_build_environment(info=False) targets = LOCAL_CACHE(bld, 'TARGET_TYPE') tlist = [] for t in Options.options.compile_targets.split(','): if not os.path.islink(t): tlist.append(t) continue link = os.readlink(t) list = link.split('/') for name in [list[-1], '/'.join(list[-2:])]: if name in targets: tlist.append(name) continue Options.options.compile_targets = ",".join(tlist) def wildcard_main(missing_cmd_fn): '''this replaces main from Scripting, allowing us to override the behaviour for unknown commands If a unknown command is found, then missing_cmd_fn() is called with the name of the requested command ''' Scripting.commands = Options.arg_line[:] # rewrite the compile targets to cope with the bin/xx form rewrite_compile_targets() while Scripting.commands: x = Scripting.commands.pop(0) ini = datetime.datetime.now() if x == 'configure': fun = Scripting.configure elif x == 'build': fun = Scripting.build else: fun = getattr(Utils.g_module, x, None) # this is the new addition on top of main from Scripting.py if not fun: missing_cmd_fn(x) break ctx = getattr(Utils.g_module, x + '_context', Utils.Context)() if x in ['init', 'shutdown', 'dist', 'distclean', 'distcheck']: try: fun(ctx) except TypeError: fun() else: fun(ctx) ela = '' if not Options.options.progress_bar: ela = ' (%s)' % Utils.get_elapsed_time(ini) if x != 'init' and x != 'shutdown': Logs.info('%r finished successfully%s' % (x, ela)) if not Scripting.commands and x != 'shutdown': Scripting.commands.append('shutdown') def fake_build_environment(info=True, flush=False): """create all the tasks for the project, but do not run the build return the build context in use""" bld = getattr(Context.g_module, 'build_context', Utils.Context)() bld = Scripting.check_configured(bld) Options.commands['install'] = False Options.commands['uninstall'] = False bld.is_install = 0 # False try: proj = ConfigSet.ConfigSet(Options.lockfile) except IOError: raise Errors.WafError("Project not configured (run 'waf configure' first)") bld.load_envs() if info: Logs.info("Waf: Entering directory `%s'" % bld.bldnode.abspath()) bld.add_subdirs([os.path.split(Context.g_module.root_path)[0]]) bld.pre_build() if flush: bld.flush() return bld tdb-1.4.2/buildtools/wafsamba/stale_files.py0000660000000000000000000000771713444661620021052 0ustar rootroot00000000000000# encoding: utf-8 # Thomas Nagy, 2006-2010 (ita) """ Add a pre-build hook to remove all build files which do not have a corresponding target This can be used for example to remove the targets that have changed name without performing a full 'waf clean' Of course, it will only work if there are no dynamically generated nodes/tasks, in which case the method will have to be modified to exclude some folders for example. """ from waflib import Logs, Build, Options, Utils, Errors import os from wafsamba import samba_utils from Runner import Parallel old_refill_task_list = Parallel.refill_task_list def replace_refill_task_list(self): '''replacement for refill_task_list() that deletes stale files''' iit = old_refill_task_list(self) bld = self.bld if not getattr(bld, 'new_rules', False): # we only need to check for stale files if the build rules changed return iit if Options.options.compile_targets: # not safe when --target is used return iit # execute only once if getattr(self, 'cleanup_done', False): return iit self.cleanup_done = True def group_name(g): tm = self.bld.task_manager return [x for x in tm.groups_names if id(tm.groups_names[x]) == id(g)][0] bin_base = bld.bldnode.abspath() bin_base_len = len(bin_base) # paranoia if bin_base[-4:] != '/bin': raise Errors.WafError("Invalid bin base: %s" % bin_base) # obtain the expected list of files expected = [] for i in range(len(bld.task_manager.groups)): g = bld.task_manager.groups[i] tasks = g.tasks_gen for x in tasks: try: if getattr(x, 'target'): tlist = samba_utils.TO_LIST(getattr(x, 'target')) ttype = getattr(x, 'samba_type', None) task_list = getattr(x, 'compiled_tasks', []) if task_list: # this gets all of the .o files, including the task # ids, so foo.c maps to foo_3.o for idx=3 for tsk in task_list: for output in tsk.outputs: objpath = os.path.normpath(output.abspath(bld.env)) expected.append(objpath) for t in tlist: if ttype in ['LIBRARY','MODULE']: t = samba_utils.apply_pattern(t, bld.env.shlib_PATTERN) if ttype == 'PYTHON': t = samba_utils.apply_pattern(t, bld.env.pyext_PATTERN) p = os.path.join(x.path.abspath(bld.env), t) p = os.path.normpath(p) expected.append(p) for n in x.allnodes: p = n.abspath(bld.env) if p[0:bin_base_len] == bin_base: expected.append(p) except: pass for root, dirs, files in os.walk(bin_base): for f in files: p = root + '/' + f if os.path.islink(p): link = os.readlink(p) if link[0:bin_base_len] == bin_base: p = link if f in ['config.h']: continue (froot, fext) = os.path.splitext(f) if fext not in [ '.c', '.h', '.so', '.o' ]: continue if f[-7:] == '.inst.h': continue if p.find("/.conf") != -1: continue if not p in expected and os.path.exists(p): Logs.warn("Removing stale file: %s" % p) os.unlink(p) return iit def AUTOCLEANUP_STALE_FILES(bld): """automatically clean up any files in bin that shouldn't be there""" old_refill_task_list = Parallel.refill_task_list Parallel.refill_task_list = replace_refill_task_list Parallel.bld = bld Build.BuildContext.AUTOCLEANUP_STALE_FILES = AUTOCLEANUP_STALE_FILES tdb-1.4.2/buildtools/wafsamba/symbols.py0000660000000000000000000005336713444661620020252 0ustar rootroot00000000000000# a waf tool to extract symbols from object files or libraries # using nm, producing a set of exposed defined/undefined symbols import os, re, subprocess from waflib import Utils, Build, Options, Logs, Errors from waflib.Logs import debug from samba_utils import TO_LIST, LOCAL_CACHE, get_tgt_list, os_path_relpath # these are the data structures used in symbols.py: # # bld.env.symbol_map : dictionary mapping public symbol names to list of # subsystem names where that symbol exists # # t.in_library : list of libraries that t is in # # bld.env.public_symbols: set of public symbols for each subsystem # bld.env.used_symbols : set of used symbols for each subsystem # # bld.env.syslib_symbols: dictionary mapping system library name to set of symbols # for that library # bld.env.library_dict : dictionary mapping built library paths to subsystem names # # LOCAL_CACHE(bld, 'TARGET_TYPE') : dictionary mapping subsystem name to target type def symbols_extract(bld, objfiles, dynamic=False): '''extract symbols from objfile, returning a dictionary containing the set of undefined and public symbols for each file''' ret = {} # see if we can get some results from the nm cache if not bld.env.nm_cache: bld.env.nm_cache = {} objfiles = set(objfiles).copy() remaining = set() for obj in objfiles: if obj in bld.env.nm_cache: ret[obj] = bld.env.nm_cache[obj].copy() else: remaining.add(obj) objfiles = remaining if len(objfiles) == 0: return ret cmd = ["nm"] if dynamic: # needed for some .so files cmd.append("-D") cmd.extend(list(objfiles)) nmpipe = subprocess.Popen(cmd, stdout=subprocess.PIPE).stdout if len(objfiles) == 1: filename = list(objfiles)[0] ret[filename] = { "PUBLIC": set(), "UNDEFINED" : set()} for line in nmpipe: line = line.strip() if line.endswith(b':'): filename = line[:-1] ret[filename] = { "PUBLIC": set(), "UNDEFINED" : set() } continue cols = line.split(b" ") if cols == [b'']: continue # see if the line starts with an address if len(cols) == 3: symbol_type = cols[1] symbol = cols[2] else: symbol_type = cols[0] symbol = cols[1] if symbol_type in b"BDGTRVWSi": # its a public symbol ret[filename]["PUBLIC"].add(symbol) elif symbol_type in b"U": ret[filename]["UNDEFINED"].add(symbol) # add to the cache for obj in objfiles: if obj in ret: bld.env.nm_cache[obj] = ret[obj].copy() else: bld.env.nm_cache[obj] = { "PUBLIC": set(), "UNDEFINED" : set() } return ret def real_name(name): if name.find(".objlist") != -1: name = name[:-8] return name def find_ldd_path(bld, libname, binary): '''find the path to the syslib we will link against''' ret = None if not bld.env.syslib_paths: bld.env.syslib_paths = {} if libname in bld.env.syslib_paths: return bld.env.syslib_paths[libname] lddpipe = subprocess.Popen(['ldd', binary], stdout=subprocess.PIPE).stdout for line in lddpipe: line = line.strip() cols = line.split(b" ") if len(cols) < 3 or cols[1] != b"=>": continue if cols[0].startswith(b"libc."): # save this one too bld.env.libc_path = cols[2] if cols[0].startswith(libname): ret = cols[2] bld.env.syslib_paths[libname] = ret return ret # some regular expressions for parsing readelf output re_sharedlib = re.compile(b'Shared library: \[(.*)\]') # output from readelf could be `Library rpath` or `Libray runpath` re_rpath = re.compile(b'Library (rpath|runpath): \[(.*)\]') def get_libs(bld, binname): '''find the list of linked libraries for any binary or library binname is the path to the binary/library on disk We do this using readelf instead of ldd as we need to avoid recursing into system libraries ''' # see if we can get the result from the ldd cache if not bld.env.lib_cache: bld.env.lib_cache = {} if binname in bld.env.lib_cache: return bld.env.lib_cache[binname].copy() rpath = [] libs = set() elfpipe = subprocess.Popen(['readelf', '--dynamic', binname], stdout=subprocess.PIPE).stdout for line in elfpipe: m = re_sharedlib.search(line) if m: libs.add(m.group(1)) m = re_rpath.search(line) if m: # output from Popen is always bytestr even in py3 rpath.extend(m.group(2).split(b":")) ret = set() for lib in libs: found = False for r in rpath: path = os.path.join(r, lib) if os.path.exists(path): ret.add(os.path.realpath(path)) found = True break if not found: # we didn't find this lib using rpath. It is probably a system # library, so to find the path to it we either need to use ldd # or we need to start parsing /etc/ld.so.conf* ourselves. We'll # use ldd for now, even though it is slow path = find_ldd_path(bld, lib, binname) if path: ret.add(os.path.realpath(path)) bld.env.lib_cache[binname] = ret.copy() return ret def get_libs_recursive(bld, binname, seen): '''find the recursive list of linked libraries for any binary or library binname is the path to the binary/library on disk. seen is a set used to prevent loops ''' if binname in seen: return set() ret = get_libs(bld, binname) seen.add(binname) for lib in ret: # we don't want to recurse into system libraries. If a system # library that we use (eg. libcups) happens to use another library # (such as libkrb5) which contains common symbols with our own # libraries, then that is not an error if lib in bld.env.library_dict: ret = ret.union(get_libs_recursive(bld, lib, seen)) return ret def find_syslib_path(bld, libname, deps): '''find the path to the syslib we will link against''' # the strategy is to use the targets that depend on the library, and run ldd # on it to find the real location of the library that is used linkpath = deps[0].link_task.outputs[0].abspath(bld.env) if libname == "python": libname += bld.env.PYTHON_VERSION return find_ldd_path(bld, "lib%s" % libname.lower(), linkpath) def build_symbol_sets(bld, tgt_list): '''build the public_symbols and undefined_symbols attributes for each target''' if bld.env.public_symbols: return objlist = [] # list of object file objmap = {} # map from object filename to target (subsystem) name for t in tgt_list: t.public_symbols = set() t.undefined_symbols = set() t.used_symbols = set() for tsk in getattr(t, 'compiled_tasks', []): for output in tsk.outputs: objpath = output.abspath(bld.env) objlist.append(objpath) objmap[objpath] = t symbols = symbols_extract(bld, objlist) for obj in objlist: t = objmap[obj] t.public_symbols = t.public_symbols.union(symbols[obj]["PUBLIC"]) t.undefined_symbols = t.undefined_symbols.union(symbols[obj]["UNDEFINED"]) t.used_symbols = t.used_symbols.union(symbols[obj]["UNDEFINED"]) t.undefined_symbols = t.undefined_symbols.difference(t.public_symbols) # and the reverse map of public symbols to subsystem name bld.env.symbol_map = {} for t in tgt_list: for s in t.public_symbols: if not s in bld.env.symbol_map: bld.env.symbol_map[s] = [] bld.env.symbol_map[s].append(real_name(t.sname)) targets = LOCAL_CACHE(bld, 'TARGET_TYPE') bld.env.public_symbols = {} for t in tgt_list: name = real_name(t.sname) if name in bld.env.public_symbols: bld.env.public_symbols[name] = bld.env.public_symbols[name].union(t.public_symbols) else: bld.env.public_symbols[name] = t.public_symbols if t.samba_type == 'LIBRARY': for dep in t.add_objects: t2 = bld.get_tgen_by_name(dep) bld.ASSERT(t2 is not None, "Library '%s' has unknown dependency '%s'" % (name, dep)) bld.env.public_symbols[name] = bld.env.public_symbols[name].union(t2.public_symbols) bld.env.used_symbols = {} for t in tgt_list: name = real_name(t.sname) if name in bld.env.used_symbols: bld.env.used_symbols[name] = bld.env.used_symbols[name].union(t.used_symbols) else: bld.env.used_symbols[name] = t.used_symbols if t.samba_type == 'LIBRARY': for dep in t.add_objects: t2 = bld.get_tgen_by_name(dep) bld.ASSERT(t2 is not None, "Library '%s' has unknown dependency '%s'" % (name, dep)) bld.env.used_symbols[name] = bld.env.used_symbols[name].union(t2.used_symbols) def build_library_dict(bld, tgt_list): '''build the library_dict dictionary''' if bld.env.library_dict: return bld.env.library_dict = {} for t in tgt_list: if t.samba_type in [ 'LIBRARY', 'PYTHON' ]: linkpath = os.path.realpath(t.link_task.outputs[0].abspath(bld.env)) bld.env.library_dict[linkpath] = t.sname def build_syslib_sets(bld, tgt_list): '''build the public_symbols for all syslibs''' if bld.env.syslib_symbols: return # work out what syslibs we depend on, and what targets those are used in syslibs = {} objmap = {} for t in tgt_list: if getattr(t, 'uselib', []) and t.samba_type in [ 'LIBRARY', 'BINARY', 'PYTHON' ]: for lib in t.uselib: if lib in ['PYEMBED', 'PYEXT']: lib = "python" if not lib in syslibs: syslibs[lib] = [] syslibs[lib].append(t) # work out the paths to each syslib syslib_paths = [] for lib in syslibs: path = find_syslib_path(bld, lib, syslibs[lib]) if path is None: Logs.warn("Unable to find syslib path for %s" % lib) if path is not None: syslib_paths.append(path) objmap[path] = lib.lower() # add in libc syslib_paths.append(bld.env.libc_path) objmap[bld.env.libc_path] = 'c' symbols = symbols_extract(bld, syslib_paths, dynamic=True) # keep a map of syslib names to public symbols bld.env.syslib_symbols = {} for lib in symbols: bld.env.syslib_symbols[lib] = symbols[lib]["PUBLIC"] # add to the map of symbols to dependencies for lib in symbols: for sym in symbols[lib]["PUBLIC"]: if not sym in bld.env.symbol_map: bld.env.symbol_map[sym] = [] bld.env.symbol_map[sym].append(objmap[lib]) # keep the libc symbols as well, as these are useful for some of the # sanity checks bld.env.libc_symbols = symbols[bld.env.libc_path]["PUBLIC"] # add to the combined map of dependency name to public_symbols for lib in bld.env.syslib_symbols: bld.env.public_symbols[objmap[lib]] = bld.env.syslib_symbols[lib] def build_autodeps(bld, t): '''build the set of dependencies for a target''' deps = set() name = real_name(t.sname) targets = LOCAL_CACHE(bld, 'TARGET_TYPE') for sym in t.undefined_symbols: if sym in t.public_symbols: continue if sym in bld.env.symbol_map: depname = bld.env.symbol_map[sym] if depname == [ name ]: # self dependencies aren't interesting continue if t.in_library == depname: # no need to depend on the library we are part of continue if depname[0] in ['c', 'python']: # these don't go into autodeps continue if targets[depname[0]] in [ 'SYSLIB' ]: deps.add(depname[0]) continue t2 = bld.get_tgen_by_name(depname[0]) if len(t2.in_library) != 1: deps.add(depname[0]) continue if t2.in_library == t.in_library: # if we're part of the same library, we don't need to autodep continue deps.add(t2.in_library[0]) t.autodeps = deps def build_library_names(bld, tgt_list): '''add a in_library attribute to all targets that are part of a library''' if bld.env.done_build_library_names: return for t in tgt_list: t.in_library = [] for t in tgt_list: if t.samba_type in [ 'LIBRARY' ]: for obj in t.samba_deps_extended: t2 = bld.get_tgen_by_name(obj) if t2 and t2.samba_type in [ 'SUBSYSTEM', 'ASN1' ]: if not t.sname in t2.in_library: t2.in_library.append(t.sname) bld.env.done_build_library_names = True def check_library_deps(bld, t): '''check that all the autodeps that have mutual dependency of this target are in the same library as the target''' name = real_name(t.sname) if len(t.in_library) > 1: Logs.warn("WARNING: Target '%s' in multiple libraries: %s" % (t.sname, t.in_library)) for dep in t.autodeps: t2 = bld.get_tgen_by_name(dep) if t2 is None: continue for dep2 in t2.autodeps: if dep2 == name and t.in_library != t2.in_library: Logs.warn("WARNING: mutual dependency %s <=> %s" % (name, real_name(t2.sname))) Logs.warn("Libraries should match. %s != %s" % (t.in_library, t2.in_library)) # raise Errors.WafError("illegal mutual dependency") def check_syslib_collisions(bld, tgt_list): '''check if a target has any symbol collisions with a syslib We do not want any code in Samba to use a symbol name from a system library. The chance of that causing problems is just too high. Note that libreplace uses a rep_XX approach of renaming symbols via macros ''' has_error = False for t in tgt_list: for lib in bld.env.syslib_symbols: common = t.public_symbols.intersection(bld.env.syslib_symbols[lib]) if common: Logs.error("ERROR: Target '%s' has symbols '%s' which is also in syslib '%s'" % (t.sname, common, lib)) has_error = True if has_error: raise Errors.WafError("symbols in common with system libraries") def check_dependencies(bld, t): '''check for depenencies that should be changed''' if bld.get_tgen_by_name(t.sname + ".objlist"): return targets = LOCAL_CACHE(bld, 'TARGET_TYPE') remaining = t.undefined_symbols.copy() remaining = remaining.difference(t.public_symbols) sname = real_name(t.sname) deps = set(t.samba_deps) for d in t.samba_deps: if targets[d] in [ 'EMPTY', 'DISABLED', 'SYSLIB', 'GENERATOR' ]: continue bld.ASSERT(d in bld.env.public_symbols, "Failed to find symbol list for dependency '%s'" % d) diff = remaining.intersection(bld.env.public_symbols[d]) if not diff and targets[sname] != 'LIBRARY': Logs.info("Target '%s' has no dependency on %s" % (sname, d)) else: remaining = remaining.difference(diff) t.unsatisfied_symbols = set() needed = {} for sym in remaining: if sym in bld.env.symbol_map: dep = bld.env.symbol_map[sym] if not dep[0] in needed: needed[dep[0]] = set() needed[dep[0]].add(sym) else: t.unsatisfied_symbols.add(sym) for dep in needed: Logs.info("Target '%s' should add dep '%s' for symbols %s" % (sname, dep, " ".join(needed[dep]))) def check_syslib_dependencies(bld, t): '''check for syslib depenencies''' if bld.get_tgen_by_name(t.sname + ".objlist"): return sname = real_name(t.sname) remaining = set() features = TO_LIST(t.features) if 'pyembed' in features or 'pyext' in features: if 'python' in bld.env.public_symbols: t.unsatisfied_symbols = t.unsatisfied_symbols.difference(bld.env.public_symbols['python']) needed = {} for sym in t.unsatisfied_symbols: if sym in bld.env.symbol_map: dep = bld.env.symbol_map[sym][0] if dep == 'c': continue if not dep in needed: needed[dep] = set() needed[dep].add(sym) else: remaining.add(sym) for dep in needed: Logs.info("Target '%s' should add syslib dep '%s' for symbols %s" % (sname, dep, " ".join(needed[dep]))) if remaining: debug("deps: Target '%s' has unsatisfied symbols: %s" % (sname, " ".join(remaining))) def symbols_symbolcheck(task): '''check the internal dependency lists''' bld = task.env.bld tgt_list = get_tgt_list(bld) build_symbol_sets(bld, tgt_list) build_library_names(bld, tgt_list) for t in tgt_list: t.autodeps = set() if getattr(t, 'source', ''): build_autodeps(bld, t) for t in tgt_list: check_dependencies(bld, t) for t in tgt_list: check_library_deps(bld, t) def symbols_syslibcheck(task): '''check the syslib dependencies''' bld = task.env.bld tgt_list = get_tgt_list(bld) build_syslib_sets(bld, tgt_list) check_syslib_collisions(bld, tgt_list) for t in tgt_list: check_syslib_dependencies(bld, t) def symbols_whyneeded(task): """check why 'target' needs to link to 'subsystem'""" bld = task.env.bld tgt_list = get_tgt_list(bld) why = Options.options.WHYNEEDED.split(":") if len(why) != 2: raise Errors.WafError("usage: WHYNEEDED=TARGET:DEPENDENCY") target = why[0] subsystem = why[1] build_symbol_sets(bld, tgt_list) build_library_names(bld, tgt_list) build_syslib_sets(bld, tgt_list) Logs.info("Checking why %s needs to link to %s" % (target, subsystem)) if not target in bld.env.used_symbols: Logs.warn("unable to find target '%s' in used_symbols dict" % target) return if not subsystem in bld.env.public_symbols: Logs.warn("unable to find subsystem '%s' in public_symbols dict" % subsystem) return overlap = bld.env.used_symbols[target].intersection(bld.env.public_symbols[subsystem]) if not overlap: Logs.info("target '%s' doesn't use any public symbols from '%s'" % (target, subsystem)) else: Logs.info("target '%s' uses symbols %s from '%s'" % (target, overlap, subsystem)) def report_duplicate(bld, binname, sym, libs, fail_on_error): '''report duplicated symbols''' if sym in ['_init', '_fini', '_edata', '_end', '__bss_start']: return libnames = [] for lib in libs: if lib in bld.env.library_dict: libnames.append(bld.env.library_dict[lib]) else: libnames.append(lib) if fail_on_error: raise Errors.WafError("%s: Symbol %s linked in multiple libraries %s" % (binname, sym, libnames)) else: print("%s: Symbol %s linked in multiple libraries %s" % (binname, sym, libnames)) def symbols_dupcheck_binary(bld, binname, fail_on_error): '''check for duplicated symbols in one binary''' libs = get_libs_recursive(bld, binname, set()) symlist = symbols_extract(bld, libs, dynamic=True) symmap = {} for libpath in symlist: for sym in symlist[libpath]['PUBLIC']: if sym == '_GLOBAL_OFFSET_TABLE_': continue if not sym in symmap: symmap[sym] = set() symmap[sym].add(libpath) for sym in symmap: if len(symmap[sym]) > 1: for libpath in symmap[sym]: if libpath in bld.env.library_dict: report_duplicate(bld, binname, sym, symmap[sym], fail_on_error) break def symbols_dupcheck(task, fail_on_error=False): '''check for symbols defined in two different subsystems''' bld = task.env.bld tgt_list = get_tgt_list(bld) targets = LOCAL_CACHE(bld, 'TARGET_TYPE') build_library_dict(bld, tgt_list) for t in tgt_list: if t.samba_type == 'BINARY': binname = os_path_relpath(t.link_task.outputs[0].abspath(bld.env), os.getcwd()) symbols_dupcheck_binary(bld, binname, fail_on_error) def symbols_dupcheck_fatal(task): '''check for symbols defined in two different subsystems (and fail if duplicates are found)''' symbols_dupcheck(task, fail_on_error=True) def SYMBOL_CHECK(bld): '''check our dependency lists''' if Options.options.SYMBOLCHECK: bld.SET_BUILD_GROUP('symbolcheck') task = bld(rule=symbols_symbolcheck, always=True, name='symbol checking') task.env.bld = bld bld.SET_BUILD_GROUP('syslibcheck') task = bld(rule=symbols_syslibcheck, always=True, name='syslib checking') task.env.bld = bld bld.SET_BUILD_GROUP('syslibcheck') task = bld(rule=symbols_dupcheck, always=True, name='symbol duplicate checking') task.env.bld = bld if Options.options.WHYNEEDED: bld.SET_BUILD_GROUP('syslibcheck') task = bld(rule=symbols_whyneeded, always=True, name='check why a dependency is needed') task.env.bld = bld Build.BuildContext.SYMBOL_CHECK = SYMBOL_CHECK def DUP_SYMBOL_CHECK(bld): if Options.options.DUP_SYMBOLCHECK and bld.env.DEVELOPER: '''check for duplicate symbols''' bld.SET_BUILD_GROUP('syslibcheck') task = bld(rule=symbols_dupcheck_fatal, always=True, name='symbol duplicate checking') task.env.bld = bld Build.BuildContext.DUP_SYMBOL_CHECK = DUP_SYMBOL_CHECK tdb-1.4.2/buildtools/wafsamba/test_duplicate_symbol.sh0000770000000000000000000000044413444661620023130 0ustar rootroot00000000000000#!/bin/sh # Run the waf duplicate symbol check, wrapped in subunit. . testprogs/blackbox/subunit.sh subunit_start_test duplicate_symbols if $PYTHON ./buildtools/bin/waf build --dup-symbol-check; then subunit_pass_test duplicate_symbols else echo | subunit_fail_test duplicate_symbols fi tdb-1.4.2/buildtools/wafsamba/tests/__init__.py0000660000000000000000000000224012406075657021451 0ustar rootroot00000000000000# Copyright (C) 2012 Jelmer Vernooij # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 2.1 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA """Tests for wafsamba.""" from unittest import ( TestCase, TestLoader, ) def test_suite(): names = [ 'abi', 'bundled', 'utils', ] module_names = ['wafsamba.tests.test_' + name for name in names] loader = TestLoader() result = loader.suiteClass() suite = loader.loadTestsFromNames(module_names) result.addTests(suite) return result tdb-1.4.2/buildtools/wafsamba/tests/test_abi.py0000660000000000000000000001042013444661620021475 0ustar rootroot00000000000000# Copyright (C) 2012 Jelmer Vernooij # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 2.1 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA from wafsamba.tests import TestCase from wafsamba.samba_abi import ( abi_write_vscript, normalise_signature, ) from samba.compat import StringIO class NormaliseSignatureTests(TestCase): def test_function_simple(self): self.assertEquals("int (const struct GUID *, const struct GUID *)", normalise_signature("$2 = {int (const struct GUID *, const struct GUID *)} 0xe871 ")) def test_maps_Bool(self): # Some types have different internal names self.assertEquals("bool (const struct GUID *)", normalise_signature("$1 = {_Bool (const struct GUID *)} 0xe75b ")) def test_function_keep(self): self.assertEquals( "enum ndr_err_code (struct ndr_push *, int, const union winreg_Data *)", normalise_signature("enum ndr_err_code (struct ndr_push *, int, const union winreg_Data *)")) def test_struct_constant(self): self.assertEquals( 'uuid = {time_low = 0, time_mid = 0, time_hi_and_version = 0, clock_seq = "\\000", node = "\\000\\000\\000\\000\\000"}, if_version = 0', normalise_signature('$239 = {uuid = {time_low = 0, time_mid = 0, time_hi_and_version = 0, clock_seq = "\\000", node = "\\000\\000\\000\\000\\000"}, if_version = 0}')) def test_incomplete_sequence(self): # Newer versions of gdb insert these incomplete sequence elements self.assertEquals( 'uuid = {time_low = 2324192516, time_mid = 7403, time_hi_and_version = 4553, clock_seq = "\\237\\350", node = "\\b\\000+\\020H`"}, if_version = 2', normalise_signature('$244 = {uuid = {time_low = 2324192516, time_mid = 7403, time_hi_and_version = 4553, clock_seq = "\\237", , node = "\\b\\000+\\020H`"}, if_version = 2}')) self.assertEquals( 'uuid = {time_low = 2324192516, time_mid = 7403, time_hi_and_version = 4553, clock_seq = "\\237\\350", node = "\\b\\000+\\020H`"}, if_version = 2', normalise_signature('$244 = {uuid = {time_low = 2324192516, time_mid = 7403, time_hi_and_version = 4553, clock_seq = "\\237\\350", node = "\\b\\000+\\020H`"}, if_version = 2}')) class WriteVscriptTests(TestCase): def test_one(self): f = StringIO() abi_write_vscript(f, "MYLIB", "1.0", [], { "old": "1.0", "new": "1.0"}, ["*"]) self.assertEquals(f.getvalue(), """\ 1.0 { \tglobal: \t\t*; \tlocal: \t\t_end; \t\t__bss_start; \t\t_edata; }; """) def test_simple(self): # No restrictions. f = StringIO() abi_write_vscript(f, "MYLIB", "1.0", ["0.1"], { "old": "0.1", "new": "1.0"}, ["*"]) self.assertEquals(f.getvalue(), """\ MYLIB_0.1 { \tglobal: \t\told; }; 1.0 { \tglobal: \t\t*; \tlocal: \t\t_end; \t\t__bss_start; \t\t_edata; }; """) def test_exclude(self): f = StringIO() abi_write_vscript(f, "MYLIB", "1.0", [], { "exc_old": "0.1", "old": "0.1", "new": "1.0"}, ["!exc_*"]) self.assertEquals(f.getvalue(), """\ 1.0 { \tglobal: \t\t*; \tlocal: \t\texc_*; \t\t_end; \t\t__bss_start; \t\t_edata; }; """) def test_excludes_and_includes(self): f = StringIO() abi_write_vscript(f, "MYLIB", "1.0", [], { "pub_foo": "1.0", "exc_bar": "1.0", "other": "1.0" }, ["pub_*", "!exc_*"]) self.assertEquals(f.getvalue(), """\ 1.0 { \tglobal: \t\tpub_*; \tlocal: \t\texc_*; \t\t_end; \t\t__bss_start; \t\t_edata; \t\t*; }; """) tdb-1.4.2/buildtools/wafsamba/tests/test_bundled.py0000660000000000000000000000176412406075657022400 0ustar rootroot00000000000000# Copyright (C) 2012 Jelmer Vernooij # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 2.1 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA from wafsamba.tests import TestCase from wafsamba.samba_bundled import ( tuplize_version, ) class TuplizeVersionTests(TestCase): def test_simple(self): self.assertEquals((1, 2, 10), tuplize_version("1.2.10")) tdb-1.4.2/buildtools/wafsamba/tests/test_utils.py0000660000000000000000000000473412406075657022123 0ustar rootroot00000000000000# Copyright (C) 2012 Jelmer Vernooij # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 2.1 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA from wafsamba.tests import TestCase from wafsamba.samba_utils import ( TO_LIST, dict_concat, subst_vars_error, unique_list, ) class ToListTests(TestCase): def test_none(self): self.assertEquals([], TO_LIST(None)) def test_already_list(self): self.assertEquals(["foo", "bar", 1], TO_LIST(["foo", "bar", 1])) def test_default_delimiter(self): self.assertEquals(["foo", "bar"], TO_LIST("foo bar")) self.assertEquals(["foo", "bar"], TO_LIST(" foo bar ")) self.assertEquals(["foo ", "bar"], TO_LIST(" \"foo \" bar ")) def test_delimiter(self): self.assertEquals(["foo", "bar"], TO_LIST("foo,bar", ",")) self.assertEquals([" foo", "bar "], TO_LIST(" foo,bar ", ",")) self.assertEquals([" \" foo\"", " bar "], TO_LIST(" \" foo\", bar ", ",")) class UniqueListTests(TestCase): def test_unique_list(self): self.assertEquals(["foo", "bar"], unique_list(["foo", "bar", "foo"])) class SubstVarsErrorTests(TestCase): def test_valid(self): self.assertEquals("", subst_vars_error("", {})) self.assertEquals("FOO bar", subst_vars_error("${F} bar", {"F": "FOO"})) def test_invalid(self): self.assertRaises(KeyError, subst_vars_error, "${F}", {}) class DictConcatTests(TestCase): def test_empty(self): ret = {} dict_concat(ret, {}) self.assertEquals({}, ret) def test_same(self): ret = {"foo": "bar"} dict_concat(ret, {"foo": "bla"}) self.assertEquals({"foo": "bar"}, ret) def test_simple(self): ret = {"foo": "bar"} dict_concat(ret, {"blie": "bla"}) self.assertEquals({"foo": "bar", "blie": "bla"}, ret) tdb-1.4.2/buildtools/wafsamba/wafsamba.py0000660000000000000000000010331413527011454020323 0ustar rootroot00000000000000# a waf tool to add autoconf-like macros to the configure section # and for SAMBA_ macros for building libraries, binaries etc import os, sys, re, shutil, fnmatch from waflib import Build, Options, Task, Utils, TaskGen, Logs, Context, Errors from waflib.Configure import conf from waflib.Logs import debug from samba_utils import SUBST_VARS_RECURSIVE TaskGen.task_gen.apply_verif = Utils.nada # bring in the other samba modules from samba_utils import * from samba_utils import symlink from samba_version import * from samba_autoconf import * from samba_patterns import * from samba_pidl import * from samba_autoproto import * from samba_python import * from samba_perl import * from samba_deps import * from samba_bundled import * from samba_third_party import * import samba_cross import samba_install import samba_conftests import samba_abi import samba_headers import generic_cc import samba_dist import samba_wildcard import symbols import pkgconfig import configure_file import samba_waf18 LIB_PATH="shared" os.environ['PYTHONUNBUFFERED'] = '1' if Context.HEXVERSION not in (0x2001100,): Logs.error(''' Please use the version of waf that comes with Samba, not a system installed version. See http://wiki.samba.org/index.php/Waf for details. Alternatively, please run ./configure and make as usual. That will call the right version of waf.''') sys.exit(1) @conf def SAMBA_BUILD_ENV(conf): '''create the samba build environment''' conf.env.BUILD_DIRECTORY = conf.bldnode.abspath() mkdir_p(os.path.join(conf.env.BUILD_DIRECTORY, LIB_PATH)) mkdir_p(os.path.join(conf.env.BUILD_DIRECTORY, LIB_PATH, "private")) mkdir_p(os.path.join(conf.env.BUILD_DIRECTORY, "modules")) mkdir_p(os.path.join(conf.env.BUILD_DIRECTORY, 'python/samba/dcerpc')) # this allows all of the bin/shared and bin/python targets # to be expressed in terms of build directory paths mkdir_p(os.path.join(conf.env.BUILD_DIRECTORY, 'default')) for (source, target) in [('shared', 'shared'), ('modules', 'modules'), ('python', 'python')]: link_target = os.path.join(conf.env.BUILD_DIRECTORY, 'default/' + target) if not os.path.lexists(link_target): symlink('../' + source, link_target) # get perl to put the blib files in the build directory blib_bld = os.path.join(conf.env.BUILD_DIRECTORY, 'default/pidl/blib') blib_src = os.path.join(conf.srcnode.abspath(), 'pidl/blib') mkdir_p(blib_bld + '/man1') mkdir_p(blib_bld + '/man3') if os.path.islink(blib_src): os.unlink(blib_src) elif os.path.exists(blib_src): shutil.rmtree(blib_src) def ADD_INIT_FUNCTION(bld, subsystem, target, init_function): '''add an init_function to the list for a subsystem''' if init_function is None: return bld.ASSERT(subsystem is not None, "You must specify a subsystem for init_function '%s'" % init_function) cache = LOCAL_CACHE(bld, 'INIT_FUNCTIONS') if not subsystem in cache: cache[subsystem] = [] cache[subsystem].append( { 'TARGET':target, 'INIT_FUNCTION':init_function } ) Build.BuildContext.ADD_INIT_FUNCTION = ADD_INIT_FUNCTION def generate_empty_file(task): task.outputs[0].write('') return 0 ################################################################# def SAMBA_LIBRARY(bld, libname, source, deps='', public_deps='', includes='', public_headers=None, public_headers_install=True, private_headers=None, header_path=None, pc_files=None, vnum=None, soname=None, cflags='', cflags_end=None, ldflags='', external_library=False, realname=None, keep_underscore=False, autoproto=None, autoproto_extra_source='', group='main', depends_on='', local_include=True, global_include=True, vars=None, subdir=None, install_path=None, install=True, pyembed=False, pyext=False, target_type='LIBRARY', bundled_extension=False, bundled_name=None, link_name=None, abi_directory=None, abi_match=None, hide_symbols=False, manpages=None, private_library=False, grouping_library=False, allow_undefined_symbols=False, allow_warnings=False, enabled=True): '''define a Samba library''' if private_library and public_headers: raise Errors.WafError("private library '%s' must not have public header files" % libname) if LIB_MUST_BE_PRIVATE(bld, libname): private_library = True if not enabled: SET_TARGET_TYPE(bld, libname, 'DISABLED') return source = bld.EXPAND_VARIABLES(source, vars=vars) if subdir: source = bld.SUBDIR(subdir, source) # remember empty libraries, so we can strip the dependencies if ((source == '') or (source == [])): if deps == '' and public_deps == '': SET_TARGET_TYPE(bld, libname, 'EMPTY') return empty_c = libname + '.empty.c' bld.SAMBA_GENERATOR('%s_empty_c' % libname, rule=generate_empty_file, target=empty_c) source=empty_c if BUILTIN_LIBRARY(bld, libname): obj_target = libname else: obj_target = libname + '.objlist' if group == 'libraries': subsystem_group = 'main' else: subsystem_group = group # first create a target for building the object files for this library # by separating in this way, we avoid recompiling the C files # separately for the install library and the build library bld.SAMBA_SUBSYSTEM(obj_target, source = source, deps = deps, public_deps = public_deps, includes = includes, public_headers = public_headers, public_headers_install = public_headers_install, private_headers= private_headers, header_path = header_path, cflags = cflags, cflags_end = cflags_end, group = subsystem_group, autoproto = autoproto, autoproto_extra_source=autoproto_extra_source, depends_on = depends_on, hide_symbols = hide_symbols, allow_warnings = allow_warnings, pyembed = pyembed, pyext = pyext, local_include = local_include, global_include = global_include) if BUILTIN_LIBRARY(bld, libname): return if not SET_TARGET_TYPE(bld, libname, target_type): return # the library itself will depend on that object target deps += ' ' + public_deps deps = TO_LIST(deps) deps.append(obj_target) realname = bld.map_shlib_extension(realname, python=(target_type=='PYTHON')) link_name = bld.map_shlib_extension(link_name, python=(target_type=='PYTHON')) # we don't want any public libraries without version numbers if (not private_library and target_type != 'PYTHON' and not realname): if vnum is None and soname is None: raise Errors.WafError("public library '%s' must have a vnum" % libname) if pc_files is None: raise Errors.WafError("public library '%s' must have pkg-config file" % libname) if public_headers is None: raise Errors.WafError("public library '%s' must have header files" % libname) if bundled_name is not None: pass elif target_type == 'PYTHON' or realname or not private_library: if keep_underscore: bundled_name = libname else: bundled_name = libname.replace('_', '-') else: assert (private_library == True and realname is None) if abi_directory or vnum or soname: bundled_extension=True bundled_name = PRIVATE_NAME(bld, libname.replace('_', '-'), bundled_extension, private_library) ldflags = TO_LIST(ldflags) if bld.env['ENABLE_RELRO'] is True: ldflags.extend(TO_LIST('-Wl,-z,relro,-z,now')) features = 'c cshlib symlink_lib install_lib' if pyext: features += ' pyext' if pyembed: features += ' pyembed' if abi_directory: features += ' abi_check' if pyembed and bld.env['PYTHON_SO_ABI_FLAG']: # For ABI checking, we don't care about the Python version. # Remove the Python ABI tag (e.g. ".cpython-35m") abi_flag = bld.env['PYTHON_SO_ABI_FLAG'] replacement = '' version_libname = libname.replace(abi_flag, replacement) else: version_libname = libname vscript = None if bld.env.HAVE_LD_VERSION_SCRIPT: if private_library: version = "%s_%s" % (Context.g_module.APPNAME, Context.g_module.VERSION) elif vnum: version = "%s_%s" % (libname, vnum) else: version = None if version: vscript = "%s.vscript" % libname bld.ABI_VSCRIPT(version_libname, abi_directory, version, vscript, abi_match) fullname = apply_pattern(bundled_name, bld.env.cshlib_PATTERN) fullpath = bld.path.find_or_declare(fullname) vscriptpath = bld.path.find_or_declare(vscript) if not fullpath: raise Errors.WafError("unable to find fullpath for %s" % fullname) if not vscriptpath: raise Errors.WafError("unable to find vscript path for %s" % vscript) bld.add_manual_dependency(fullpath, vscriptpath) if bld.is_install: # also make the .inst file depend on the vscript instname = apply_pattern(bundled_name + '.inst', bld.env.cshlib_PATTERN) bld.add_manual_dependency(bld.path.find_or_declare(instname), bld.path.find_or_declare(vscript)) vscript = os.path.join(bld.path.abspath(bld.env), vscript) bld.SET_BUILD_GROUP(group) t = bld( features = features, source = [], target = bundled_name, depends_on = depends_on, samba_ldflags = ldflags, samba_deps = deps, samba_includes = includes, version_script = vscript, version_libname = version_libname, local_include = local_include, global_include = global_include, vnum = vnum, soname = soname, install_path = None, samba_inst_path = install_path, name = libname, samba_realname = realname, samba_install = install, abi_directory = "%s/%s" % (bld.path.abspath(), abi_directory), abi_match = abi_match, private_library = private_library, grouping_library=grouping_library, allow_undefined_symbols=allow_undefined_symbols ) if realname and not link_name: link_name = 'shared/%s' % realname if link_name: if 'waflib.extras.compat15' in sys.modules: link_name = 'default/' + link_name t.link_name = link_name if pc_files is not None and not private_library: if pyembed: bld.PKG_CONFIG_FILES(pc_files, vnum=vnum, extra_name=bld.env['PYTHON_SO_ABI_FLAG']) else: bld.PKG_CONFIG_FILES(pc_files, vnum=vnum) if (manpages is not None and 'XSLTPROC_MANPAGES' in bld.env and bld.env['XSLTPROC_MANPAGES']): bld.MANPAGES(manpages, install) Build.BuildContext.SAMBA_LIBRARY = SAMBA_LIBRARY ################################################################# def SAMBA_BINARY(bld, binname, source, deps='', includes='', public_headers=None, private_headers=None, header_path=None, modules=None, ldflags=None, cflags='', cflags_end=None, autoproto=None, use_hostcc=False, use_global_deps=True, compiler=None, group='main', manpages=None, local_include=True, global_include=True, subsystem_name=None, pyembed=False, vars=None, subdir=None, install=True, install_path=None, enabled=True): '''define a Samba binary''' if not enabled: SET_TARGET_TYPE(bld, binname, 'DISABLED') return if not SET_TARGET_TYPE(bld, binname, 'BINARY'): return features = 'c cprogram symlink_bin install_bin' if pyembed: features += ' pyembed' obj_target = binname + '.objlist' source = bld.EXPAND_VARIABLES(source, vars=vars) if subdir: source = bld.SUBDIR(subdir, source) source = unique_list(TO_LIST(source)) if group == 'binaries': subsystem_group = 'main' else: subsystem_group = group # only specify PIE flags for binaries pie_cflags = cflags pie_ldflags = TO_LIST(ldflags) if bld.env['ENABLE_PIE'] is True: pie_cflags += ' -fPIE' pie_ldflags.extend(TO_LIST('-pie')) if bld.env['ENABLE_RELRO'] is True: pie_ldflags.extend(TO_LIST('-Wl,-z,relro,-z,now')) # first create a target for building the object files for this binary # by separating in this way, we avoid recompiling the C files # separately for the install binary and the build binary bld.SAMBA_SUBSYSTEM(obj_target, source = source, deps = deps, includes = includes, cflags = pie_cflags, cflags_end = cflags_end, group = subsystem_group, autoproto = autoproto, subsystem_name = subsystem_name, local_include = local_include, global_include = global_include, use_hostcc = use_hostcc, pyext = pyembed, use_global_deps= use_global_deps) bld.SET_BUILD_GROUP(group) # the binary itself will depend on that object target deps = TO_LIST(deps) deps.append(obj_target) t = bld( features = features, source = [], target = binname, samba_deps = deps, samba_includes = includes, local_include = local_include, global_include = global_include, samba_modules = modules, top = True, samba_subsystem= subsystem_name, install_path = None, samba_inst_path= install_path, samba_install = install, samba_ldflags = pie_ldflags ) if manpages is not None and 'XSLTPROC_MANPAGES' in bld.env and bld.env['XSLTPROC_MANPAGES']: bld.MANPAGES(manpages, install) Build.BuildContext.SAMBA_BINARY = SAMBA_BINARY ################################################################# def SAMBA_MODULE(bld, modname, source, deps='', includes='', subsystem=None, init_function=None, module_init_name='samba_init_module', autoproto=None, autoproto_extra_source='', cflags='', cflags_end=None, internal_module=True, local_include=True, global_include=True, vars=None, subdir=None, enabled=True, pyembed=False, manpages=None, allow_undefined_symbols=False, allow_warnings=False, install=True ): '''define a Samba module.''' bld.ASSERT(subsystem, "You must specify a subsystem for SAMBA_MODULE(%s)" % modname) source = bld.EXPAND_VARIABLES(source, vars=vars) if subdir: source = bld.SUBDIR(subdir, source) if internal_module or BUILTIN_LIBRARY(bld, modname): # Do not create modules for disabled subsystems if GET_TARGET_TYPE(bld, subsystem) == 'DISABLED': return bld.SAMBA_SUBSYSTEM(modname, source, deps=deps, includes=includes, autoproto=autoproto, autoproto_extra_source=autoproto_extra_source, cflags=cflags, cflags_end=cflags_end, local_include=local_include, global_include=global_include, allow_warnings=allow_warnings, enabled=enabled) bld.ADD_INIT_FUNCTION(subsystem, modname, init_function) return if not enabled: SET_TARGET_TYPE(bld, modname, 'DISABLED') return # Do not create modules for disabled subsystems if GET_TARGET_TYPE(bld, subsystem) == 'DISABLED': return realname = modname deps += ' ' + subsystem while realname.startswith("lib"+subsystem+"_"): realname = realname[len("lib"+subsystem+"_"):] while realname.startswith(subsystem+"_"): realname = realname[len(subsystem+"_"):] build_name = "%s_module_%s" % (subsystem, realname) realname = bld.make_libname(realname) while realname.startswith("lib"): realname = realname[len("lib"):] build_link_name = "modules/%s/%s" % (subsystem, realname) if init_function: cflags += " -D%s=%s" % (init_function, module_init_name) bld.SAMBA_LIBRARY(modname, source, deps=deps, includes=includes, cflags=cflags, cflags_end=cflags_end, realname = realname, autoproto = autoproto, local_include=local_include, global_include=global_include, vars=vars, bundled_name=build_name, link_name=build_link_name, install_path="${MODULESDIR}/%s" % subsystem, pyembed=pyembed, manpages=manpages, allow_undefined_symbols=allow_undefined_symbols, allow_warnings=allow_warnings, install=install ) Build.BuildContext.SAMBA_MODULE = SAMBA_MODULE ################################################################# def SAMBA_SUBSYSTEM(bld, modname, source, deps='', public_deps='', includes='', public_headers=None, public_headers_install=True, private_headers=None, header_path=None, cflags='', cflags_end=None, group='main', init_function_sentinel=None, autoproto=None, autoproto_extra_source='', depends_on='', local_include=True, local_include_first=True, global_include=True, subsystem_name=None, enabled=True, use_hostcc=False, use_global_deps=True, vars=None, subdir=None, hide_symbols=False, allow_warnings=False, pyext=False, pyembed=False): '''define a Samba subsystem''' if not enabled: SET_TARGET_TYPE(bld, modname, 'DISABLED') return # remember empty subsystems, so we can strip the dependencies if ((source == '') or (source == [])): if deps == '' and public_deps == '': SET_TARGET_TYPE(bld, modname, 'EMPTY') return empty_c = modname + '.empty.c' bld.SAMBA_GENERATOR('%s_empty_c' % modname, rule=generate_empty_file, target=empty_c) source=empty_c if not SET_TARGET_TYPE(bld, modname, 'SUBSYSTEM'): return source = bld.EXPAND_VARIABLES(source, vars=vars) if subdir: source = bld.SUBDIR(subdir, source) source = unique_list(TO_LIST(source)) deps += ' ' + public_deps bld.SET_BUILD_GROUP(group) features = 'c' if pyext: features += ' pyext' if pyembed: features += ' pyembed' t = bld( features = features, source = source, target = modname, samba_cflags = CURRENT_CFLAGS(bld, modname, cflags, allow_warnings=allow_warnings, hide_symbols=hide_symbols), depends_on = depends_on, samba_deps = TO_LIST(deps), samba_includes = includes, local_include = local_include, local_include_first = local_include_first, global_include = global_include, samba_subsystem= subsystem_name, samba_use_hostcc = use_hostcc, samba_use_global_deps = use_global_deps, ) if cflags_end is not None: t.samba_cflags.extend(TO_LIST(cflags_end)) if autoproto is not None: bld.SAMBA_AUTOPROTO(autoproto, source + TO_LIST(autoproto_extra_source)) if public_headers is not None: bld.PUBLIC_HEADERS(public_headers, header_path=header_path, public_headers_install=public_headers_install) return t Build.BuildContext.SAMBA_SUBSYSTEM = SAMBA_SUBSYSTEM def SAMBA_GENERATOR(bld, name, rule, source='', target='', group='generators', enabled=True, public_headers=None, public_headers_install=True, private_headers=None, header_path=None, vars=None, dep_vars=[], always=False): '''A generic source generator target''' if not SET_TARGET_TYPE(bld, name, 'GENERATOR'): return if not enabled: return dep_vars.append('ruledeps') dep_vars.append('SAMBA_GENERATOR_VARS') bld.SET_BUILD_GROUP(group) t = bld( rule=rule, source=bld.EXPAND_VARIABLES(source, vars=vars), target=target, shell=isinstance(rule, str), update_outputs=True, before='c', ext_out='.c', samba_type='GENERATOR', dep_vars = dep_vars, name=name) if vars is None: vars = {} t.env.SAMBA_GENERATOR_VARS = vars if always: t.always = True if public_headers is not None: bld.PUBLIC_HEADERS(public_headers, header_path=header_path, public_headers_install=public_headers_install) return t Build.BuildContext.SAMBA_GENERATOR = SAMBA_GENERATOR @Utils.run_once def SETUP_BUILD_GROUPS(bld): '''setup build groups used to ensure that the different build phases happen consecutively''' bld.p_ln = bld.srcnode # we do want to see all targets! bld.env['USING_BUILD_GROUPS'] = True bld.add_group('setup') bld.add_group('build_compiler_source') bld.add_group('vscripts') bld.add_group('base_libraries') bld.add_group('generators') bld.add_group('compiler_prototypes') bld.add_group('compiler_libraries') bld.add_group('build_compilers') bld.add_group('build_source') bld.add_group('prototypes') bld.add_group('headers') bld.add_group('main') bld.add_group('symbolcheck') bld.add_group('syslibcheck') bld.add_group('final') Build.BuildContext.SETUP_BUILD_GROUPS = SETUP_BUILD_GROUPS def SET_BUILD_GROUP(bld, group): '''set the current build group''' if not 'USING_BUILD_GROUPS' in bld.env: return bld.set_group(group) Build.BuildContext.SET_BUILD_GROUP = SET_BUILD_GROUP def SAMBA_SCRIPT(bld, name, pattern, installdir, installname=None): '''used to copy scripts from the source tree into the build directory for use by selftest''' source = bld.path.ant_glob(pattern, flat=True) bld.SET_BUILD_GROUP('build_source') for s in TO_LIST(source): iname = s if installname is not None: iname = installname target = os.path.join(installdir, iname) tgtdir = os.path.dirname(os.path.join(bld.srcnode.abspath(bld.env), '..', target)) mkdir_p(tgtdir) link_src = os.path.normpath(os.path.join(bld.path.abspath(), s)) link_dst = os.path.join(tgtdir, os.path.basename(iname)) if os.path.islink(link_dst) and os.readlink(link_dst) == link_src: continue if os.path.islink(link_dst): os.unlink(link_dst) Logs.info("symlink: %s -> %s/%s" % (s, installdir, iname)) symlink(link_src, link_dst) Build.BuildContext.SAMBA_SCRIPT = SAMBA_SCRIPT def copy_and_fix_python_path(task): pattern='sys.path.insert(0, "bin/python")' if task.env["PYTHONARCHDIR"] in sys.path and task.env["PYTHONDIR"] in sys.path: replacement = "" elif task.env["PYTHONARCHDIR"] == task.env["PYTHONDIR"]: replacement="""sys.path.insert(0, "%s")""" % task.env["PYTHONDIR"] else: replacement="""sys.path.insert(0, "%s") sys.path.insert(1, "%s")""" % (task.env["PYTHONARCHDIR"], task.env["PYTHONDIR"]) if task.env["PYTHON"][0].startswith("/"): replacement_shebang = "#!%s\n" % task.env["PYTHON"][0] else: replacement_shebang = "#!/usr/bin/env %s\n" % task.env["PYTHON"][0] installed_location=task.outputs[0].bldpath(task.env) source_file = open(task.inputs[0].srcpath(task.env)) installed_file = open(installed_location, 'w') lineno = 0 for line in source_file: newline = line if (lineno == 0 and line[:2] == "#!"): newline = replacement_shebang elif pattern in line: newline = line.replace(pattern, replacement) installed_file.write(newline) lineno = lineno + 1 installed_file.close() os.chmod(installed_location, 0o755) return 0 def copy_and_fix_perl_path(task): pattern='use lib "$RealBin/lib";' replacement = "" if not task.env["PERL_LIB_INSTALL_DIR"] in task.env["PERL_INC"]: replacement = 'use lib "%s";' % task.env["PERL_LIB_INSTALL_DIR"] if task.env["PERL"][0] == "/": replacement_shebang = "#!%s\n" % task.env["PERL"] else: replacement_shebang = "#!/usr/bin/env %s\n" % task.env["PERL"] installed_location=task.outputs[0].bldpath(task.env) source_file = open(task.inputs[0].srcpath(task.env)) installed_file = open(installed_location, 'w') lineno = 0 for line in source_file: newline = line if lineno == 0 and task.env["PERL_SPECIFIED"] == True and line[:2] == "#!": newline = replacement_shebang elif pattern in line: newline = line.replace(pattern, replacement) installed_file.write(newline) lineno = lineno + 1 installed_file.close() os.chmod(installed_location, 0o755) return 0 def install_file(bld, destdir, file, chmod=MODE_644, flat=False, python_fixup=False, perl_fixup=False, destname=None, base_name=None): '''install a file''' if not isinstance(file, str): file = file.abspath() destdir = bld.EXPAND_VARIABLES(destdir) if not destname: destname = file if flat: destname = os.path.basename(destname) dest = os.path.join(destdir, destname) if python_fixup: # fix the path python will use to find Samba modules inst_file = file + '.inst' bld.SAMBA_GENERATOR('python_%s' % destname, rule=copy_and_fix_python_path, dep_vars=["PYTHON","PYTHON_SPECIFIED","PYTHONDIR","PYTHONARCHDIR"], source=file, target=inst_file) file = inst_file if perl_fixup: # fix the path perl will use to find Samba modules inst_file = file + '.inst' bld.SAMBA_GENERATOR('perl_%s' % destname, rule=copy_and_fix_perl_path, dep_vars=["PERL","PERL_SPECIFIED","PERL_LIB_INSTALL_DIR"], source=file, target=inst_file) file = inst_file if base_name: file = os.path.join(base_name, file) bld.install_as(dest, file, chmod=chmod) def INSTALL_FILES(bld, destdir, files, chmod=MODE_644, flat=False, python_fixup=False, perl_fixup=False, destname=None, base_name=None): '''install a set of files''' for f in TO_LIST(files): install_file(bld, destdir, f, chmod=chmod, flat=flat, python_fixup=python_fixup, perl_fixup=perl_fixup, destname=destname, base_name=base_name) Build.BuildContext.INSTALL_FILES = INSTALL_FILES def INSTALL_WILDCARD(bld, destdir, pattern, chmod=MODE_644, flat=False, python_fixup=False, exclude=None, trim_path=None): '''install a set of files matching a wildcard pattern''' files=TO_LIST(bld.path.ant_glob(pattern, flat=True)) if trim_path: files2 = [] for f in files: files2.append(os_path_relpath(f, trim_path)) files = files2 if exclude: for f in files[:]: if fnmatch.fnmatch(f, exclude): files.remove(f) INSTALL_FILES(bld, destdir, files, chmod=chmod, flat=flat, python_fixup=python_fixup, base_name=trim_path) Build.BuildContext.INSTALL_WILDCARD = INSTALL_WILDCARD def INSTALL_DIR(bld, path, chmod=0o755, env=None): """Install a directory if it doesn't exist, always set permissions.""" if not path: return [] destpath = bld.EXPAND_VARIABLES(path) if Options.options.destdir: destpath = os.path.join(Options.options.destdir, destpath.lstrip(os.sep)) if bld.is_install > 0: if not os.path.isdir(destpath): try: Logs.info('* create %s', destpath) os.makedirs(destpath) os.chmod(destpath, chmod) except OSError as e: if not os.path.isdir(destpath): raise Errors.WafError("Cannot create the folder '%s' (error: %s)" % (path, e)) Build.BuildContext.INSTALL_DIR = INSTALL_DIR def INSTALL_DIRS(bld, destdir, dirs, chmod=0o755, env=None): '''install a set of directories''' destdir = bld.EXPAND_VARIABLES(destdir) dirs = bld.EXPAND_VARIABLES(dirs) for d in TO_LIST(dirs): INSTALL_DIR(bld, os.path.join(destdir, d), chmod, env) Build.BuildContext.INSTALL_DIRS = INSTALL_DIRS def MANPAGES(bld, manpages, install): '''build and install manual pages''' bld.env.MAN_XSL = 'http://docbook.sourceforge.net/release/xsl/current/manpages/docbook.xsl' for m in manpages.split(): source = m + '.xml' bld.SAMBA_GENERATOR(m, source=source, target=m, group='final', rule='${XSLTPROC} --xinclude -o ${TGT} --nonet ${MAN_XSL} ${SRC}' ) if install: bld.INSTALL_FILES('${MANDIR}/man%s' % m[-1], m, flat=True) Build.BuildContext.MANPAGES = MANPAGES def SAMBAMANPAGES(bld, manpages, extra_source=None): '''build and install manual pages''' bld.env.SAMBA_EXPAND_XSL = bld.srcnode.abspath() + '/docs-xml/xslt/expand-sambadoc.xsl' bld.env.SAMBA_MAN_XSL = bld.srcnode.abspath() + '/docs-xml/xslt/man.xsl' bld.env.SAMBA_CATALOG = bld.bldnode.abspath() + '/docs-xml/build/catalog.xml' bld.env.SAMBA_CATALOGS = 'file:///etc/xml/catalog file:///usr/local/share/xml/catalog file://' + bld.env.SAMBA_CATALOG for m in manpages.split(): source = m + '.xml' if extra_source is not None: source = [source, extra_source] bld.SAMBA_GENERATOR(m, source=source, target=m, group='final', dep_vars=['SAMBA_MAN_XSL', 'SAMBA_EXPAND_XSL', 'SAMBA_CATALOG'], rule='''XML_CATALOG_FILES="${SAMBA_CATALOGS}" export XML_CATALOG_FILES ${XSLTPROC} --xinclude --stringparam noreference 0 -o ${TGT}.xml --nonet ${SAMBA_EXPAND_XSL} ${SRC[0].abspath(env)} ${XSLTPROC} --nonet -o ${TGT} ${SAMBA_MAN_XSL} ${TGT}.xml''' ) bld.INSTALL_FILES('${MANDIR}/man%s' % m[-1], m, flat=True) Build.BuildContext.SAMBAMANPAGES = SAMBAMANPAGES @after('apply_link') @feature('cshlib') def apply_bundle_remove_dynamiclib_patch(self): if self.env['MACBUNDLE'] or getattr(self,'mac_bundle',False): if not getattr(self,'vnum',None): try: self.env['LINKFLAGS'].remove('-dynamiclib') self.env['LINKFLAGS'].remove('-single_module') except ValueError: pass tdb-1.4.2/buildtools/wafsamba/wscript0000660000000000000000000006366213527011454017621 0ustar rootroot00000000000000#!/usr/bin/env python # this is a base set of waf rules that everything else pulls in first import os, sys from waflib import Configure, Logs, Options, Utils, Context, Errors import wafsamba from samba_utils import os_path_relpath from optparse import SUPPRESS_HELP # this forces configure to be re-run if any of the configure # sections of the build scripts change. We have to check # for this in sys.argv as options have not yet been parsed when # we need to set this. This is off by default until some issues # are resolved related to WAFCACHE. It will need a lot of testing # before it is enabled by default. if '--enable-auto-reconfigure' in sys.argv: Configure.autoconfig = 'clobber' def default_value(option, default=''): if option in Options.options.__dict__: return Options.options.__dict__[option] return default def options(opt): opt.load('compiler_cc') opt.load('gnu_dirs') gr = opt.option_group('library handling options') gr.add_option('--bundled-libraries', help=("comma separated list of bundled libraries. May include !LIBNAME to disable bundling a library. Can be 'NONE' or 'ALL' [auto]"), action="store", dest='BUNDLED_LIBS', default='') gr.add_option('--private-libraries', help=("comma separated list of normally public libraries to build instead as private libraries. May include !LIBNAME to disable making a library private. Can be 'NONE' or 'ALL' [auto]"), action="store", dest='PRIVATE_LIBS', default='') extension_default = default_value('PRIVATE_EXTENSION_DEFAULT') gr.add_option('--private-library-extension', help=("name extension for private libraries [%s]" % extension_default), action="store", dest='PRIVATE_EXTENSION', default=extension_default) extension_exception = default_value('PRIVATE_EXTENSION_EXCEPTION') gr.add_option('--private-extension-exception', help=("comma separated list of libraries to not apply extension to [%s]" % extension_exception), action="store", dest='PRIVATE_EXTENSION_EXCEPTION', default=extension_exception) builtin_default = default_value('BUILTIN_LIBRARIES_DEFAULT') gr.add_option('--builtin-libraries', help=("command separated list of libraries to build directly into binaries [%s]" % builtin_default), action="store", dest='BUILTIN_LIBRARIES', default=builtin_default) gr.add_option('--minimum-library-version', help=("list of minimum system library versions (LIBNAME1:version,LIBNAME2:version)"), action="store", dest='MINIMUM_LIBRARY_VERSION', default='') gr.add_option('--disable-rpath', help=("Disable use of rpath for build binaries"), action="store_true", dest='disable_rpath_build', default=False) gr.add_option('--disable-rpath-install', help=("Disable use of rpath for library path in installed files"), action="store_true", dest='disable_rpath_install', default=False) gr.add_option('--disable-rpath-private-install', help=("Disable use of rpath for private library path in installed files"), action="store_true", dest='disable_rpath_private_install', default=False) gr.add_option('--nonshared-binary', help=("Disable use of shared libs for the listed binaries"), action="store", dest='NONSHARED_BINARIES', default='') gr.add_option('--disable-symbol-versions', help=("Disable use of the --version-script linker option"), action="store_true", dest='disable_symbol_versions', default=False) opt.add_option('--with-modulesdir', help=("modules directory [PREFIX/modules]"), action="store", dest='MODULESDIR', default='${PREFIX}/modules') opt.add_option('--with-privatelibdir', help=("private library directory [PREFIX/lib/%s]" % Context.g_module.APPNAME), action="store", dest='PRIVATELIBDIR', default=None) opt.add_option('--with-libiconv', help='additional directory to search for libiconv', action='store', dest='iconv_open', default='/usr/local', match = ['Checking for library iconv', 'Checking for iconv_open', 'Checking for header iconv.h']) opt.add_option('--without-gettext', help=("Disable use of gettext"), action="store_true", dest='disable_gettext', default=False) gr = opt.option_group('developer options') gr.add_option('-C', help='enable configure cacheing', action='store_true', dest='enable_configure_cache') gr.add_option('--enable-auto-reconfigure', help='enable automatic reconfigure on build', action='store_true', dest='enable_auto_reconfigure') gr.add_option('--enable-debug', help=("Turn on debugging symbols"), action="store_true", dest='debug', default=False) gr.add_option('--enable-developer', help=("Turn on developer warnings and debugging"), action="store_true", dest='developer', default=False) gr.add_option('--disable-warnings-as-errors', help=("Do not treat all warnings as errors (disable -Werror)"), action="store_true", dest='disable_warnings_as_errors', default=False) opt.add_option('--enable-coverage', help=("enable options necessary for code coverage " "reporting on selftest (default=no)"), action="store_true", dest='enable_coverage', default=False) gr.add_option('--fatal-errors', help=("Stop compilation on first error (enable -Wfatal-errors)"), action="store_true", dest='fatal_errors', default=False) gr.add_option('--enable-gccdeps', help=("Enable use of gcc -MD dependency module"), action="store_true", dest='enable_gccdeps', default=True) gr.add_option('--pedantic', help=("Enable even more compiler warnings"), action='store_true', dest='pedantic', default=False) gr.add_option('--git-local-changes', help=("mark version with + if local git changes"), action='store_true', dest='GIT_LOCAL_CHANGES', default=False) gr.add_option('--address-sanitizer', help=("Enable address sanitizer compile and linker flags"), action="store_true", dest='address_sanitizer', default=False) gr.add_option('--undefined-sanitizer', help=("Enable undefined behaviour sanitizer compile and linker flags"), action="store_true", dest='undefined_sanitizer', default=False) gr.add_option('--enable-libfuzzer', help=("Build fuzzing binaries (requires compiler options for libFuzzer or compiler wrapper such as honggfuzz/hfuzz-cc)"), action="store_true", dest='enable_libfuzzer', default=False) gr.add_option('--abi-check', help=("Check ABI signatures for libraries"), action='store_true', dest='ABI_CHECK', default=False) gr.add_option('--abi-check-disable', help=("Disable ABI checking (used with --enable-developer)"), action='store_true', dest='ABI_CHECK_DISABLE', default=False) gr.add_option('--abi-update', help=("Update ABI signature files for libraries"), action='store_true', dest='ABI_UPDATE', default=False) gr.add_option('--show-deps', help=("Show dependency tree for the given target"), dest='SHOWDEPS', default='') gr.add_option('--symbol-check', help=("check symbols in object files against project rules"), action='store_true', dest='SYMBOLCHECK', default=False) gr.add_option('--dup-symbol-check', help=("check for duplicate symbols in object files and system libs (must be configured with --enable-developer)"), action='store_true', dest='DUP_SYMBOLCHECK', default=False) gr.add_option('--why-needed', help=("TARGET:DEPENDENCY check why TARGET needs DEPENDENCY"), action='store', type='str', dest='WHYNEEDED', default=None) gr.add_option('--show-duplicates', help=("Show objects which are included in multiple binaries or libraries"), action='store_true', dest='SHOW_DUPLICATES', default=False) gr = opt.add_option_group('cross compilation options') gr.add_option('--cross-compile', help=("configure for cross-compilation"), action='store_true', dest='CROSS_COMPILE', default=False) gr.add_option('--cross-execute', help=("command prefix to use for cross-execution in configure"), action='store', dest='CROSS_EXECUTE', default='') gr.add_option('--cross-answers', help=("answers to cross-compilation configuration (auto modified)"), action='store', dest='CROSS_ANSWERS', default='') gr.add_option('--hostcc', help=("set host compiler when cross compiling"), action='store', dest='HOSTCC', default=False) # we use SUPPRESS_HELP for these, as they are ignored, and are there only # to allow existing RPM spec files to work opt.add_option('--build', help=SUPPRESS_HELP, action='store', dest='AUTOCONF_BUILD', default='') opt.add_option('--host', help=SUPPRESS_HELP, action='store', dest='AUTOCONF_HOST', default='') opt.add_option('--target', help=SUPPRESS_HELP, action='store', dest='AUTOCONF_TARGET', default='') opt.add_option('--program-prefix', help=SUPPRESS_HELP, action='store', dest='AUTOCONF_PROGRAM_PREFIX', default='') opt.add_option('--disable-dependency-tracking', help=SUPPRESS_HELP, action='store_true', dest='AUTOCONF_DISABLE_DEPENDENCY_TRACKING', default=False) opt.add_option('--disable-silent-rules', help=SUPPRESS_HELP, action='store_true', dest='AUTOCONF_DISABLE_SILENT_RULES', default=False) gr = opt.option_group('dist options') gr.add_option('--sign-release', help='sign the release tarball created by waf dist', action='store_true', dest='SIGN_RELEASE') gr.add_option('--tag', help='tag release in git at the same time', type='string', action='store', dest='TAG_RELEASE') opt.add_option('--disable-python', help='do not generate python modules', action='store_true', dest='disable_python', default=False) @Utils.run_once def configure(conf): conf.env.hlist = [] conf.env.srcdir = conf.srcnode.abspath() conf.define('SRCDIR', conf.env['srcdir']) conf.SETUP_CONFIGURE_CACHE(Options.options.enable_configure_cache) # load our local waf extensions conf.load('gnu_dirs') conf.load('wafsamba') conf.CHECK_CC_ENV() conf.load('compiler_c') conf.CHECK_STANDARD_LIBPATH() # we need git for 'waf dist' conf.find_program('git', var='GIT') # older gcc versions (< 4.4) does not work with gccdeps, so we have to see if the .d file is generated if Options.options.enable_gccdeps: # stale file removal - the configuration may pick up the old .pyc file p = os.path.join(conf.env.srcdir, 'buildtools/wafsamba/gccdeps.pyc') if os.path.exists(p): os.remove(p) conf.load('gccdeps') # make the install paths available in environment conf.env.LIBDIR = Options.options.LIBDIR or '${PREFIX}/lib' conf.env.BINDIR = Options.options.BINDIR or '${PREFIX}/bin' conf.env.SBINDIR = Options.options.SBINDIR or '${PREFIX}/sbin' conf.env.MODULESDIR = Options.options.MODULESDIR conf.env.PRIVATELIBDIR = Options.options.PRIVATELIBDIR conf.env.BUNDLED_LIBS = Options.options.BUNDLED_LIBS.split(',') conf.env.SYSTEM_LIBS = () conf.env.PRIVATE_LIBS = Options.options.PRIVATE_LIBS.split(',') conf.env.BUILTIN_LIBRARIES = Options.options.BUILTIN_LIBRARIES.split(',') conf.env.NONSHARED_BINARIES = Options.options.NONSHARED_BINARIES.split(',') conf.env.PRIVATE_EXTENSION = Options.options.PRIVATE_EXTENSION conf.env.PRIVATE_EXTENSION_EXCEPTION = Options.options.PRIVATE_EXTENSION_EXCEPTION.split(',') conf.env.CROSS_COMPILE = Options.options.CROSS_COMPILE conf.env.CROSS_EXECUTE = Options.options.CROSS_EXECUTE conf.env.CROSS_ANSWERS = Options.options.CROSS_ANSWERS conf.env.HOSTCC = Options.options.HOSTCC conf.env.AUTOCONF_BUILD = Options.options.AUTOCONF_BUILD conf.env.AUTOCONF_HOST = Options.options.AUTOCONF_HOST conf.env.AUTOCONF_PROGRAM_PREFIX = Options.options.AUTOCONF_PROGRAM_PREFIX conf.env.disable_python = Options.options.disable_python if (conf.env.AUTOCONF_HOST and conf.env.AUTOCONF_BUILD and conf.env.AUTOCONF_BUILD != conf.env.AUTOCONF_HOST): Logs.error('ERROR: Mismatch between --build and --host. Please use --cross-compile instead') sys.exit(1) if conf.env.AUTOCONF_PROGRAM_PREFIX: Logs.error('ERROR: --program-prefix not supported') sys.exit(1) # enable ABI checking for developers conf.env.ABI_CHECK = Options.options.ABI_CHECK or Options.options.developer if Options.options.ABI_CHECK_DISABLE: conf.env.ABI_CHECK = False try: conf.find_program('gdb', mandatory=True) except: conf.env.ABI_CHECK = False conf.env.enable_coverage = Options.options.enable_coverage if conf.env.enable_coverage: conf.ADD_LDFLAGS('-lgcov', testflags=True) conf.ADD_CFLAGS('--coverage', testflags=True) # disable abi check for coverage, otherwise ld will fail conf.env.ABI_CHECK = False conf.env.GIT_LOCAL_CHANGES = Options.options.GIT_LOCAL_CHANGES conf.CHECK_UNAME() # see if we can compile and run a simple C program conf.CHECK_CODE('printf("hello world")', define='HAVE_SIMPLE_C_PROG', mandatory=True, execute=True, headers='stdio.h', msg='Checking simple C program') # Try to find the right extra flags for -Werror behaviour for f in ["-Werror", # GCC "-errwarn=%all", # Sun Studio "-qhalt=w", # IBM xlc "-w2", # Tru64 ]: if conf.CHECK_CFLAGS([f]): if not 'WERROR_CFLAGS' in conf.env: conf.env['WERROR_CFLAGS'] = [] conf.env['WERROR_CFLAGS'].extend([f]) break # check which compiler/linker flags are needed for rpath support if conf.CHECK_LDFLAGS(['-Wl,-rpath,.']): conf.env['RPATH_ST'] = '-Wl,-rpath,%s' elif conf.CHECK_LDFLAGS(['-Wl,-R,.']): conf.env['RPATH_ST'] = '-Wl,-R,%s' # check for rpath if conf.CHECK_LIBRARY_SUPPORT(rpath=True): support_rpath = True conf.env.RPATH_ON_BUILD = not Options.options.disable_rpath_build conf.env.RPATH_ON_INSTALL = (conf.env.RPATH_ON_BUILD and not Options.options.disable_rpath_install) if not conf.env.PRIVATELIBDIR: conf.env.PRIVATELIBDIR = '%s/%s' % (conf.env.LIBDIR, Context.g_module.APPNAME) conf.env.RPATH_ON_INSTALL_PRIVATE = ( not Options.options.disable_rpath_private_install) else: support_rpath = False conf.env.RPATH_ON_INSTALL = False conf.env.RPATH_ON_BUILD = False conf.env.RPATH_ON_INSTALL_PRIVATE = False if not conf.env.PRIVATELIBDIR: # rpath is not possible so there is no sense in having a # private library directory by default. # the user can of course always override it. conf.env.PRIVATELIBDIR = conf.env.LIBDIR if (not Options.options.disable_symbol_versions and conf.CHECK_LIBRARY_SUPPORT(rpath=support_rpath, version_script=True, msg='-Wl,--version-script support')): conf.env.HAVE_LD_VERSION_SCRIPT = True else: conf.env.HAVE_LD_VERSION_SCRIPT = False if conf.CHECK_CFLAGS(['-fvisibility=hidden']): conf.env.VISIBILITY_CFLAGS = '-fvisibility=hidden' conf.CHECK_CODE('''int main(void) { return 0; } __attribute__((visibility("default"))) void vis_foo2(void) {}\n''', cflags=conf.env.VISIBILITY_CFLAGS, strict=True, define='HAVE_VISIBILITY_ATTR', addmain=False) # check HAVE_CONSTRUCTOR_ATTRIBUTE conf.CHECK_CODE(''' void test_constructor_attribute(void) __attribute__ ((constructor)); void test_constructor_attribute(void) { return; } int main(void) { return 0; } ''', 'HAVE_CONSTRUCTOR_ATTRIBUTE', addmain=False, strict=True, msg='Checking for library constructor support') # check HAVE_DESTRUCTOR_ATTRIBUTE conf.CHECK_CODE(''' void test_destructor_attribute(void) __attribute__ ((destructor)); void test_destructor_attribute(void) { return; } int main(void) { return 0; } ''', 'HAVE_DESTRUCTOR_ATTRIBUTE', addmain=False, strict=True, msg='Checking for library destructor support') conf.CHECK_CODE(''' void test_attribute(void) __attribute__ (()); void test_attribute(void) { return; } int main(void) { return 0; } ''', 'HAVE___ATTRIBUTE__', addmain=False, strict=True, msg='Checking for __attribute__') if sys.platform.startswith('aix'): conf.DEFINE('_ALL_SOURCE', 1, add_to_cflags=True) # Might not be needed if ALL_SOURCE is defined # conf.DEFINE('_XOPEN_SOURCE', 600, add_to_cflags=True) # we should use the PIC options in waf instead # Some compilo didn't support -fPIC but just print a warning if conf.env['COMPILER_CC'] == "suncc": conf.ADD_CFLAGS('-KPIC', testflags=True) # we really want define here as we need to have this # define even during the tests otherwise detection of # boolean is broken conf.DEFINE('_STDC_C99', 1, add_to_cflags=True) conf.DEFINE('_XPG6', 1, add_to_cflags=True) else: conf.ADD_CFLAGS('-fPIC', testflags=True) # On Solaris 8 with suncc (at least) the flags for the linker to define the name of the # library are not always working (if the command line is very very long and with a lot # files) if conf.env['COMPILER_CC'] == "suncc": save = conf.env['SONAME_ST'] conf.env['SONAME_ST'] = '-Wl,-h,%s' if not conf.CHECK_SHLIB_INTRASINC_NAME_FLAGS("Checking if flags %s are ok" % conf.env['SONAME_ST']): conf.env['SONAME_ST'] = save conf.CHECK_INLINE() # check for pkgconfig conf.CHECK_CFG(atleast_pkgconfig_version='0.0.0') conf.DEFINE('_GNU_SOURCE', 1, add_to_cflags=True) conf.DEFINE('_XOPEN_SOURCE_EXTENDED', 1, add_to_cflags=True) # # Needs to be defined before std*.h and string*.h are included # As Python.h already brings string.h we need it in CFLAGS. # See memset_s() details here: # https://en.cppreference.com/w/c/string/byte/memset # if conf.CHECK_CFLAGS(['-D__STDC_WANT_LIB_EXT1__=1']): conf.ADD_CFLAGS('-D__STDC_WANT_LIB_EXT1__=1') # on Tru64 certain features are only available with _OSF_SOURCE set to 1 # and _XOPEN_SOURCE set to 600 if conf.env['SYSTEM_UNAME_SYSNAME'] == 'OSF1': conf.DEFINE('_OSF_SOURCE', 1, add_to_cflags=True) conf.DEFINE('_XOPEN_SOURCE', 600, add_to_cflags=True) # SCM_RIGHTS is only avail if _XOPEN_SOURCE iÑ• defined on IRIX if conf.env['SYSTEM_UNAME_SYSNAME'] == 'IRIX': conf.DEFINE('_XOPEN_SOURCE', 600, add_to_cflags=True) conf.DEFINE('_BSD_TYPES', 1, add_to_cflags=True) # Try to find the right extra flags for C99 initialisers for f in ["", "-AC99", "-qlanglvl=extc99", "-qlanglvl=stdc99", "-c99"]: if conf.CHECK_CFLAGS([f], ''' struct foo {int x;char y;}; struct foo bar = { .y = 'X', .x = 1 }; '''): if f != "": conf.ADD_CFLAGS(f) break # get the base headers we'll use for the rest of the tests conf.CHECK_HEADERS('stdio.h sys/types.h sys/stat.h stdlib.h stddef.h memory.h string.h', add_headers=True) conf.CHECK_HEADERS('strings.h inttypes.h stdint.h unistd.h minix/config.h', add_headers=True) conf.CHECK_HEADERS('ctype.h', add_headers=True) if sys.platform != 'darwin': conf.CHECK_HEADERS('standards.h', add_headers=True) conf.CHECK_HEADERS('stdbool.h stdint.h stdarg.h vararg.h', add_headers=True) conf.CHECK_HEADERS('limits.h assert.h') # see if we need special largefile flags if not conf.CHECK_LARGEFILE(): raise Errors.WafError('Samba requires large file support support, but not available on this platform: sizeof(off_t) < 8') if conf.env.HAVE_STDDEF_H and conf.env.HAVE_STDLIB_H: conf.DEFINE('STDC_HEADERS', 1) conf.CHECK_HEADERS('sys/time.h time.h', together=True) if conf.env.HAVE_SYS_TIME_H and conf.env.HAVE_TIME_H: conf.DEFINE('TIME_WITH_SYS_TIME', 1) # cope with different extensions for libraries (root, ext) = os.path.splitext(conf.env.cshlib_PATTERN) if ext[0] == '.': conf.define('SHLIBEXT', ext[1:], quote=True) else: conf.define('SHLIBEXT', "so", quote=True) # First try a header check for cross-compile friendlyness conf.CHECK_CODE(code = """#ifdef __BYTE_ORDER #define B __BYTE_ORDER #elif defined(BYTE_ORDER) #define B BYTE_ORDER #endif #ifdef __LITTLE_ENDIAN #define LITTLE __LITTLE_ENDIAN #elif defined(LITTLE_ENDIAN) #define LITTLE LITTLE_ENDIAN #endif #if !defined(LITTLE) || !defined(B) || LITTLE != B #error Not little endian. #endif int main(void) { return 0; }\n""", addmain=False, headers="endian.h sys/endian.h", define="HAVE_LITTLE_ENDIAN") conf.CHECK_CODE(code = """#ifdef __BYTE_ORDER #define B __BYTE_ORDER #elif defined(BYTE_ORDER) #define B BYTE_ORDER #endif #ifdef __BIG_ENDIAN #define BIG __BIG_ENDIAN #elif defined(BIG_ENDIAN) #define BIG BIG_ENDIAN #endif #if !defined(BIG) || !defined(B) || BIG != B #error Not big endian. #endif int main(void) { return 0; }\n""", addmain=False, headers="endian.h sys/endian.h", define="HAVE_BIG_ENDIAN") if not conf.CONFIG_SET("HAVE_BIG_ENDIAN") and not conf.CONFIG_SET("HAVE_LITTLE_ENDIAN"): # That didn't work! Do runtime test. conf.CHECK_CODE("""union { int i; char c[sizeof(int)]; } u; u.i = 0x01020304; return u.c[0] == 0x04 && u.c[1] == 0x03 && u.c[2] == 0x02 && u.c[3] == 0x01 ? 0 : 1;""", addmain=True, execute=True, define='HAVE_LITTLE_ENDIAN', msg="Checking for HAVE_LITTLE_ENDIAN - runtime") conf.CHECK_CODE("""union { int i; char c[sizeof(int)]; } u; u.i = 0x01020304; return u.c[0] == 0x01 && u.c[1] == 0x02 && u.c[2] == 0x03 && u.c[3] == 0x04 ? 0 : 1;""", addmain=True, execute=True, define='HAVE_BIG_ENDIAN', msg="Checking for HAVE_BIG_ENDIAN - runtime") # Extra sanity check. if conf.CONFIG_SET("HAVE_BIG_ENDIAN") == conf.CONFIG_SET("HAVE_LITTLE_ENDIAN"): Logs.error("Failed endian determination. The PDP-11 is back?") sys.exit(1) else: if conf.CONFIG_SET("HAVE_BIG_ENDIAN"): conf.DEFINE('WORDS_BIGENDIAN', 1) # check if signal() takes a void function if conf.CHECK_CODE('return *(signal (0, 0)) (0) == 1', define='RETSIGTYPE_INT', execute=False, headers='signal.h', msg='Checking if signal handlers return int'): conf.DEFINE('RETSIGTYPE', 'int') else: conf.DEFINE('RETSIGTYPE', 'void') conf.CHECK_VARIABLE('__FUNCTION__', define='HAVE_FUNCTION_MACRO') conf.CHECK_CODE('va_list ap1,ap2; va_copy(ap1,ap2)', define="HAVE_VA_COPY", msg="Checking for va_copy") conf.CHECK_CODE(''' #define eprintf(...) fprintf(stderr, __VA_ARGS__) eprintf("bla", "bar") ''', define='HAVE__VA_ARGS__MACRO') conf.env.enable_libfuzzer = Options.options.enable_libfuzzer if conf.env.enable_libfuzzer: conf.DEFINE('ENABLE_LIBFUZZER', 1) conf.SAMBA_BUILD_ENV() def build(bld): # give a more useful message if the source directory has moved curdir = bld.path.abspath() srcdir = bld.srcnode.abspath() relpath = os_path_relpath(curdir, srcdir) if relpath.find('../') != -1: Logs.error('bld.path %s is not a child of %s' % (curdir, srcdir)) raise Errors.WafError('''The top source directory has moved. Please run distclean and reconfigure''') bld.SETUP_BUILD_GROUPS() bld.ENFORCE_GROUP_ORDERING() bld.CHECK_PROJECT_RULES() tdb-1.4.2/third_party/waf/waflib/Build.py0000660000000000000000000012470413527011455020221 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005-2018 (ita) """ Classes related to the build phase (build, clean, install, step, etc) The inheritance tree is the following: """ import os, sys, errno, re, shutil, stat try: import cPickle except ImportError: import pickle as cPickle from waflib import Node, Runner, TaskGen, Utils, ConfigSet, Task, Logs, Options, Context, Errors CACHE_DIR = 'c4che' """Name of the cache directory""" CACHE_SUFFIX = '_cache.py' """ConfigSet cache files for variants are written under :py:attr:´waflib.Build.CACHE_DIR´ in the form ´variant_name´_cache.py""" INSTALL = 1337 """Positive value '->' install, see :py:attr:`waflib.Build.BuildContext.is_install`""" UNINSTALL = -1337 """Negative value '<-' uninstall, see :py:attr:`waflib.Build.BuildContext.is_install`""" SAVED_ATTRS = 'root node_sigs task_sigs imp_sigs raw_deps node_deps'.split() """Build class members to save between the runs; these should be all dicts except for `root` which represents a :py:class:`waflib.Node.Node` instance """ CFG_FILES = 'cfg_files' """Files from the build directory to hash before starting the build (``config.h`` written during the configuration)""" POST_AT_ONCE = 0 """Post mode: all task generators are posted before any task executed""" POST_LAZY = 1 """Post mode: post the task generators group after group, the tasks in the next group are created when the tasks in the previous groups are done""" PROTOCOL = -1 if sys.platform == 'cli': PROTOCOL = 0 class BuildContext(Context.Context): '''executes the build''' cmd = 'build' variant = '' def __init__(self, **kw): super(BuildContext, self).__init__(**kw) self.is_install = 0 """Non-zero value when installing or uninstalling file""" self.top_dir = kw.get('top_dir', Context.top_dir) """See :py:attr:`waflib.Context.top_dir`; prefer :py:attr:`waflib.Build.BuildContext.srcnode`""" self.out_dir = kw.get('out_dir', Context.out_dir) """See :py:attr:`waflib.Context.out_dir`; prefer :py:attr:`waflib.Build.BuildContext.bldnode`""" self.run_dir = kw.get('run_dir', Context.run_dir) """See :py:attr:`waflib.Context.run_dir`""" self.launch_dir = Context.launch_dir """See :py:attr:`waflib.Context.out_dir`; prefer :py:meth:`waflib.Build.BuildContext.launch_node`""" self.post_mode = POST_LAZY """Whether to post the task generators at once or group-by-group (default is group-by-group)""" self.cache_dir = kw.get('cache_dir') if not self.cache_dir: self.cache_dir = os.path.join(self.out_dir, CACHE_DIR) self.all_envs = {} """Map names to :py:class:`waflib.ConfigSet.ConfigSet`, the empty string must map to the default environment""" # ======================================= # # cache variables self.node_sigs = {} """Dict mapping build nodes to task identifier (uid), it indicates whether a task created a particular file (persists across builds)""" self.task_sigs = {} """Dict mapping task identifiers (uid) to task signatures (persists across builds)""" self.imp_sigs = {} """Dict mapping task identifiers (uid) to implicit task dependencies used for scanning targets (persists across builds)""" self.node_deps = {} """Dict mapping task identifiers (uid) to node dependencies found by :py:meth:`waflib.Task.Task.scan` (persists across builds)""" self.raw_deps = {} """Dict mapping task identifiers (uid) to custom data returned by :py:meth:`waflib.Task.Task.scan` (persists across builds)""" self.task_gen_cache_names = {} self.jobs = Options.options.jobs """Amount of jobs to run in parallel""" self.targets = Options.options.targets """List of targets to build (default: \\*)""" self.keep = Options.options.keep """Whether the build should continue past errors""" self.progress_bar = Options.options.progress_bar """ Level of progress status: 0. normal output 1. progress bar 2. IDE output 3. No output at all """ # Manual dependencies. self.deps_man = Utils.defaultdict(list) """Manual dependencies set by :py:meth:`waflib.Build.BuildContext.add_manual_dependency`""" # just the structure here self.current_group = 0 """ Current build group """ self.groups = [] """ List containing lists of task generators """ self.group_names = {} """ Map group names to the group lists. See :py:meth:`waflib.Build.BuildContext.add_group` """ for v in SAVED_ATTRS: if not hasattr(self, v): setattr(self, v, {}) def get_variant_dir(self): """Getter for the variant_dir attribute""" if not self.variant: return self.out_dir return os.path.join(self.out_dir, os.path.normpath(self.variant)) variant_dir = property(get_variant_dir, None) def __call__(self, *k, **kw): """ Create a task generator and add it to the current build group. The following forms are equivalent:: def build(bld): tg = bld(a=1, b=2) def build(bld): tg = bld() tg.a = 1 tg.b = 2 def build(bld): tg = TaskGen.task_gen(a=1, b=2) bld.add_to_group(tg, None) :param group: group name to add the task generator to :type group: string """ kw['bld'] = self ret = TaskGen.task_gen(*k, **kw) self.task_gen_cache_names = {} # reset the cache, each time self.add_to_group(ret, group=kw.get('group')) return ret def __copy__(self): """ Build contexts cannot be copied :raises: :py:class:`waflib.Errors.WafError` """ raise Errors.WafError('build contexts cannot be copied') def load_envs(self): """ The configuration command creates files of the form ``build/c4che/NAMEcache.py``. This method creates a :py:class:`waflib.ConfigSet.ConfigSet` instance for each ``NAME`` by reading those files and stores them in :py:attr:`waflib.Build.BuildContext.allenvs`. """ node = self.root.find_node(self.cache_dir) if not node: raise Errors.WafError('The project was not configured: run "waf configure" first!') lst = node.ant_glob('**/*%s' % CACHE_SUFFIX, quiet=True) if not lst: raise Errors.WafError('The cache directory is empty: reconfigure the project') for x in lst: name = x.path_from(node).replace(CACHE_SUFFIX, '').replace('\\', '/') env = ConfigSet.ConfigSet(x.abspath()) self.all_envs[name] = env for f in env[CFG_FILES]: newnode = self.root.find_resource(f) if not newnode or not newnode.exists(): raise Errors.WafError('Missing configuration file %r, reconfigure the project!' % f) def init_dirs(self): """ Initialize the project directory and the build directory by creating the nodes :py:attr:`waflib.Build.BuildContext.srcnode` and :py:attr:`waflib.Build.BuildContext.bldnode` corresponding to ``top_dir`` and ``variant_dir`` respectively. The ``bldnode`` directory is created if necessary. """ if not (os.path.isabs(self.top_dir) and os.path.isabs(self.out_dir)): raise Errors.WafError('The project was not configured: run "waf configure" first!') self.path = self.srcnode = self.root.find_dir(self.top_dir) self.bldnode = self.root.make_node(self.variant_dir) self.bldnode.mkdir() def execute(self): """ Restore data from previous builds and call :py:meth:`waflib.Build.BuildContext.execute_build`. Overrides from :py:func:`waflib.Context.Context.execute` """ self.restore() if not self.all_envs: self.load_envs() self.execute_build() def execute_build(self): """ Execute the build by: * reading the scripts (see :py:meth:`waflib.Context.Context.recurse`) * calling :py:meth:`waflib.Build.BuildContext.pre_build` to call user build functions * calling :py:meth:`waflib.Build.BuildContext.compile` to process the tasks * calling :py:meth:`waflib.Build.BuildContext.post_build` to call user build functions """ Logs.info("Waf: Entering directory `%s'", self.variant_dir) self.recurse([self.run_dir]) self.pre_build() # display the time elapsed in the progress bar self.timer = Utils.Timer() try: self.compile() finally: if self.progress_bar == 1 and sys.stderr.isatty(): c = self.producer.processed or 1 m = self.progress_line(c, c, Logs.colors.BLUE, Logs.colors.NORMAL) Logs.info(m, extra={'stream': sys.stderr, 'c1': Logs.colors.cursor_off, 'c2' : Logs.colors.cursor_on}) Logs.info("Waf: Leaving directory `%s'", self.variant_dir) try: self.producer.bld = None del self.producer except AttributeError: pass self.post_build() def restore(self): """ Load data from a previous run, sets the attributes listed in :py:const:`waflib.Build.SAVED_ATTRS` """ try: env = ConfigSet.ConfigSet(os.path.join(self.cache_dir, 'build.config.py')) except EnvironmentError: pass else: if env.version < Context.HEXVERSION: raise Errors.WafError('Project was configured with a different version of Waf, please reconfigure it') for t in env.tools: self.setup(**t) dbfn = os.path.join(self.variant_dir, Context.DBFILE) try: data = Utils.readf(dbfn, 'rb') except (EnvironmentError, EOFError): # handle missing file/empty file Logs.debug('build: Could not load the build cache %s (missing)', dbfn) else: try: Node.pickle_lock.acquire() Node.Nod3 = self.node_class try: data = cPickle.loads(data) except Exception as e: Logs.debug('build: Could not pickle the build cache %s: %r', dbfn, e) else: for x in SAVED_ATTRS: setattr(self, x, data.get(x, {})) finally: Node.pickle_lock.release() self.init_dirs() def store(self): """ Store data for next runs, set the attributes listed in :py:const:`waflib.Build.SAVED_ATTRS`. Uses a temporary file to avoid problems on ctrl+c. """ data = {} for x in SAVED_ATTRS: data[x] = getattr(self, x) db = os.path.join(self.variant_dir, Context.DBFILE) try: Node.pickle_lock.acquire() Node.Nod3 = self.node_class x = cPickle.dumps(data, PROTOCOL) finally: Node.pickle_lock.release() Utils.writef(db + '.tmp', x, m='wb') try: st = os.stat(db) os.remove(db) if not Utils.is_win32: # win32 has no chown but we're paranoid os.chown(db + '.tmp', st.st_uid, st.st_gid) except (AttributeError, OSError): pass # do not use shutil.move (copy is not thread-safe) os.rename(db + '.tmp', db) def compile(self): """ Run the build by creating an instance of :py:class:`waflib.Runner.Parallel` The cache file is written when at least a task was executed. :raises: :py:class:`waflib.Errors.BuildError` in case the build fails """ Logs.debug('build: compile()') # delegate the producer-consumer logic to another object to reduce the complexity self.producer = Runner.Parallel(self, self.jobs) self.producer.biter = self.get_build_iterator() try: self.producer.start() except KeyboardInterrupt: if self.is_dirty(): self.store() raise else: if self.is_dirty(): self.store() if self.producer.error: raise Errors.BuildError(self.producer.error) def is_dirty(self): return self.producer.dirty def setup(self, tool, tooldir=None, funs=None): """ Import waf tools defined during the configuration:: def configure(conf): conf.load('glib2') def build(bld): pass # glib2 is imported implicitly :param tool: tool list :type tool: list :param tooldir: optional tool directory (sys.path) :type tooldir: list of string :param funs: unused variable """ if isinstance(tool, list): for i in tool: self.setup(i, tooldir) return module = Context.load_tool(tool, tooldir) if hasattr(module, "setup"): module.setup(self) def get_env(self): """Getter for the env property""" try: return self.all_envs[self.variant] except KeyError: return self.all_envs[''] def set_env(self, val): """Setter for the env property""" self.all_envs[self.variant] = val env = property(get_env, set_env) def add_manual_dependency(self, path, value): """ Adds a dependency from a node object to a value:: def build(bld): bld.add_manual_dependency( bld.path.find_resource('wscript'), bld.root.find_resource('/etc/fstab')) :param path: file path :type path: string or :py:class:`waflib.Node.Node` :param value: value to depend :type value: :py:class:`waflib.Node.Node`, byte object, or function returning a byte object """ if not path: raise ValueError('Invalid input path %r' % path) if isinstance(path, Node.Node): node = path elif os.path.isabs(path): node = self.root.find_resource(path) else: node = self.path.find_resource(path) if not node: raise ValueError('Could not find the path %r' % path) if isinstance(value, list): self.deps_man[node].extend(value) else: self.deps_man[node].append(value) def launch_node(self): """Returns the launch directory as a :py:class:`waflib.Node.Node` object (cached)""" try: # private cache return self.p_ln except AttributeError: self.p_ln = self.root.find_dir(self.launch_dir) return self.p_ln def hash_env_vars(self, env, vars_lst): """ Hashes configuration set variables:: def build(bld): bld.hash_env_vars(bld.env, ['CXX', 'CC']) This method uses an internal cache. :param env: Configuration Set :type env: :py:class:`waflib.ConfigSet.ConfigSet` :param vars_lst: list of variables :type vars_list: list of string """ if not env.table: env = env.parent if not env: return Utils.SIG_NIL idx = str(id(env)) + str(vars_lst) try: cache = self.cache_env except AttributeError: cache = self.cache_env = {} else: try: return self.cache_env[idx] except KeyError: pass lst = [env[a] for a in vars_lst] cache[idx] = ret = Utils.h_list(lst) Logs.debug('envhash: %s %r', Utils.to_hex(ret), lst) return ret def get_tgen_by_name(self, name): """ Fetches a task generator by its name or its target attribute; the name must be unique in a build:: def build(bld): tg = bld(name='foo') tg == bld.get_tgen_by_name('foo') This method use a private internal cache. :param name: Task generator name :raises: :py:class:`waflib.Errors.WafError` in case there is no task genenerator by that name """ cache = self.task_gen_cache_names if not cache: # create the index lazily for g in self.groups: for tg in g: try: cache[tg.name] = tg except AttributeError: # raised if not a task generator, which should be uncommon pass try: return cache[name] except KeyError: raise Errors.WafError('Could not find a task generator for the name %r' % name) def progress_line(self, idx, total, col1, col2): """ Computes a progress bar line displayed when running ``waf -p`` :returns: progress bar line :rtype: string """ if not sys.stderr.isatty(): return '' n = len(str(total)) Utils.rot_idx += 1 ind = Utils.rot_chr[Utils.rot_idx % 4] pc = (100. * idx)/total fs = "[%%%dd/%%d][%%s%%2d%%%%%%s][%s][" % (n, ind) left = fs % (idx, total, col1, pc, col2) right = '][%s%s%s]' % (col1, self.timer, col2) cols = Logs.get_term_cols() - len(left) - len(right) + 2*len(col1) + 2*len(col2) if cols < 7: cols = 7 ratio = ((cols * idx)//total) - 1 bar = ('='*ratio+'>').ljust(cols) msg = Logs.indicator % (left, bar, right) return msg def declare_chain(self, *k, **kw): """ Wraps :py:func:`waflib.TaskGen.declare_chain` for convenience """ return TaskGen.declare_chain(*k, **kw) def pre_build(self): """Executes user-defined methods before the build starts, see :py:meth:`waflib.Build.BuildContext.add_pre_fun`""" for m in getattr(self, 'pre_funs', []): m(self) def post_build(self): """Executes user-defined methods after the build is successful, see :py:meth:`waflib.Build.BuildContext.add_post_fun`""" for m in getattr(self, 'post_funs', []): m(self) def add_pre_fun(self, meth): """ Binds a callback method to execute after the scripts are read and before the build starts:: def mycallback(bld): print("Hello, world!") def build(bld): bld.add_pre_fun(mycallback) """ try: self.pre_funs.append(meth) except AttributeError: self.pre_funs = [meth] def add_post_fun(self, meth): """ Binds a callback method to execute immediately after the build is successful:: def call_ldconfig(bld): bld.exec_command('/sbin/ldconfig') def build(bld): if bld.cmd == 'install': bld.add_pre_fun(call_ldconfig) """ try: self.post_funs.append(meth) except AttributeError: self.post_funs = [meth] def get_group(self, x): """ Returns the build group named `x`, or the current group if `x` is None :param x: name or number or None :type x: string, int or None """ if not self.groups: self.add_group() if x is None: return self.groups[self.current_group] if x in self.group_names: return self.group_names[x] return self.groups[x] def add_to_group(self, tgen, group=None): """Adds a task or a task generator to the build; there is no attempt to remove it if it was already added.""" assert(isinstance(tgen, TaskGen.task_gen) or isinstance(tgen, Task.Task)) tgen.bld = self self.get_group(group).append(tgen) def get_group_name(self, g): """ Returns the name of the input build group :param g: build group object or build group index :type g: integer or list :return: name :rtype: string """ if not isinstance(g, list): g = self.groups[g] for x in self.group_names: if id(self.group_names[x]) == id(g): return x return '' def get_group_idx(self, tg): """ Returns the index of the group containing the task generator given as argument:: def build(bld): tg = bld(name='nada') 0 == bld.get_group_idx(tg) :param tg: Task generator object :type tg: :py:class:`waflib.TaskGen.task_gen` :rtype: int """ se = id(tg) for i, tmp in enumerate(self.groups): for t in tmp: if id(t) == se: return i return None def add_group(self, name=None, move=True): """ Adds a new group of tasks/task generators. By default the new group becomes the default group for new task generators (make sure to create build groups in order). :param name: name for this group :type name: string :param move: set this new group as default group (True by default) :type move: bool :raises: :py:class:`waflib.Errors.WafError` if a group by the name given already exists """ if name and name in self.group_names: raise Errors.WafError('add_group: name %s already present', name) g = [] self.group_names[name] = g self.groups.append(g) if move: self.current_group = len(self.groups) - 1 def set_group(self, idx): """ Sets the build group at position idx as current so that newly added task generators are added to this one by default:: def build(bld): bld(rule='touch ${TGT}', target='foo.txt') bld.add_group() # now the current group is 1 bld(rule='touch ${TGT}', target='bar.txt') bld.set_group(0) # now the current group is 0 bld(rule='touch ${TGT}', target='truc.txt') # build truc.txt before bar.txt :param idx: group name or group index :type idx: string or int """ if isinstance(idx, str): g = self.group_names[idx] for i, tmp in enumerate(self.groups): if id(g) == id(tmp): self.current_group = i break else: self.current_group = idx def total(self): """ Approximate task count: this value may be inaccurate if task generators are posted lazily (see :py:attr:`waflib.Build.BuildContext.post_mode`). The value :py:attr:`waflib.Runner.Parallel.total` is updated during the task execution. :rtype: int """ total = 0 for group in self.groups: for tg in group: try: total += len(tg.tasks) except AttributeError: total += 1 return total def get_targets(self): """ This method returns a pair containing the index of the last build group to post, and the list of task generator objects corresponding to the target names. This is used internally by :py:meth:`waflib.Build.BuildContext.get_build_iterator` to perform partial builds:: $ waf --targets=myprogram,myshlib :return: the minimum build group index, and list of task generators :rtype: tuple """ to_post = [] min_grp = 0 for name in self.targets.split(','): tg = self.get_tgen_by_name(name) m = self.get_group_idx(tg) if m > min_grp: min_grp = m to_post = [tg] elif m == min_grp: to_post.append(tg) return (min_grp, to_post) def get_all_task_gen(self): """ Returns a list of all task generators for troubleshooting purposes. """ lst = [] for g in self.groups: lst.extend(g) return lst def post_group(self): """ Post task generators from the group indexed by self.current_group; used internally by :py:meth:`waflib.Build.BuildContext.get_build_iterator` """ def tgpost(tg): try: f = tg.post except AttributeError: pass else: f() if self.targets == '*': for tg in self.groups[self.current_group]: tgpost(tg) elif self.targets: if self.current_group < self._min_grp: for tg in self.groups[self.current_group]: tgpost(tg) else: for tg in self._exact_tg: tg.post() else: ln = self.launch_node() if ln.is_child_of(self.bldnode): Logs.warn('Building from the build directory, forcing --targets=*') ln = self.srcnode elif not ln.is_child_of(self.srcnode): Logs.warn('CWD %s is not under %s, forcing --targets=* (run distclean?)', ln.abspath(), self.srcnode.abspath()) ln = self.srcnode def is_post(tg, ln): try: p = tg.path except AttributeError: pass else: if p.is_child_of(ln): return True def is_post_group(): for i, g in enumerate(self.groups): if i > self.current_group: for tg in g: if is_post(tg, ln): return True if self.post_mode == POST_LAZY and ln != self.srcnode: # partial folder builds require all targets from a previous build group if is_post_group(): ln = self.srcnode for tg in self.groups[self.current_group]: if is_post(tg, ln): tgpost(tg) def get_tasks_group(self, idx): """ Returns all task instances for the build group at position idx, used internally by :py:meth:`waflib.Build.BuildContext.get_build_iterator` :rtype: list of :py:class:`waflib.Task.Task` """ tasks = [] for tg in self.groups[idx]: try: tasks.extend(tg.tasks) except AttributeError: # not a task generator tasks.append(tg) return tasks def get_build_iterator(self): """ Creates a Python generator object that returns lists of tasks that may be processed in parallel. :return: tasks which can be executed immediately :rtype: generator returning lists of :py:class:`waflib.Task.Task` """ if self.targets and self.targets != '*': (self._min_grp, self._exact_tg) = self.get_targets() if self.post_mode != POST_LAZY: for self.current_group, _ in enumerate(self.groups): self.post_group() for self.current_group, _ in enumerate(self.groups): # first post the task generators for the group if self.post_mode != POST_AT_ONCE: self.post_group() # then extract the tasks tasks = self.get_tasks_group(self.current_group) # if the constraints are set properly (ext_in/ext_out, before/after) # the call to set_file_constraints may be removed (can be a 15% penalty on no-op rebuilds) # (but leave set_file_constraints for the installation step) # # if the tasks have only files, set_file_constraints is required but set_precedence_constraints is not necessary # Task.set_file_constraints(tasks) Task.set_precedence_constraints(tasks) self.cur_tasks = tasks if tasks: yield tasks while 1: # the build stops once there are no tasks to process yield [] def install_files(self, dest, files, **kw): """ Creates a task generator to install files on the system:: def build(bld): bld.install_files('${DATADIR}', self.path.find_resource('wscript')) :param dest: path representing the destination directory :type dest: :py:class:`waflib.Node.Node` or string (absolute path) :param files: input files :type files: list of strings or list of :py:class:`waflib.Node.Node` :param env: configuration set to expand *dest* :type env: :py:class:`waflib.ConfigSet.ConfigSet` :param relative_trick: preserve the folder hierarchy when installing whole folders :type relative_trick: bool :param cwd: parent node for searching srcfile, when srcfile is not an instance of :py:class:`waflib.Node.Node` :type cwd: :py:class:`waflib.Node.Node` :param postpone: execute the task immediately to perform the installation (False by default) :type postpone: bool """ assert(dest) tg = self(features='install_task', install_to=dest, install_from=files, **kw) tg.dest = tg.install_to tg.type = 'install_files' if not kw.get('postpone', True): tg.post() return tg def install_as(self, dest, srcfile, **kw): """ Creates a task generator to install a file on the system with a different name:: def build(bld): bld.install_as('${PREFIX}/bin', 'myapp', chmod=Utils.O755) :param dest: destination file :type dest: :py:class:`waflib.Node.Node` or string (absolute path) :param srcfile: input file :type srcfile: string or :py:class:`waflib.Node.Node` :param cwd: parent node for searching srcfile, when srcfile is not an instance of :py:class:`waflib.Node.Node` :type cwd: :py:class:`waflib.Node.Node` :param env: configuration set for performing substitutions in dest :type env: :py:class:`waflib.ConfigSet.ConfigSet` :param postpone: execute the task immediately to perform the installation (False by default) :type postpone: bool """ assert(dest) tg = self(features='install_task', install_to=dest, install_from=srcfile, **kw) tg.dest = tg.install_to tg.type = 'install_as' if not kw.get('postpone', True): tg.post() return tg def symlink_as(self, dest, src, **kw): """ Creates a task generator to install a symlink:: def build(bld): bld.symlink_as('${PREFIX}/lib/libfoo.so', 'libfoo.so.1.2.3') :param dest: absolute path of the symlink :type dest: :py:class:`waflib.Node.Node` or string (absolute path) :param src: link contents, which is a relative or absolute path which may exist or not :type src: string :param env: configuration set for performing substitutions in dest :type env: :py:class:`waflib.ConfigSet.ConfigSet` :param add: add the task created to a build group - set ``False`` only if the installation task is created after the build has started :type add: bool :param postpone: execute the task immediately to perform the installation :type postpone: bool :param relative_trick: make the symlink relative (default: ``False``) :type relative_trick: bool """ assert(dest) tg = self(features='install_task', install_to=dest, install_from=src, **kw) tg.dest = tg.install_to tg.type = 'symlink_as' tg.link = src # TODO if add: self.add_to_group(tsk) if not kw.get('postpone', True): tg.post() return tg @TaskGen.feature('install_task') @TaskGen.before_method('process_rule', 'process_source') def process_install_task(self): """Creates the installation task for the current task generator; uses :py:func:`waflib.Build.add_install_task` internally.""" self.add_install_task(**self.__dict__) @TaskGen.taskgen_method def add_install_task(self, **kw): """ Creates the installation task for the current task generator, and executes it immediately if necessary :returns: An installation task :rtype: :py:class:`waflib.Build.inst` """ if not self.bld.is_install: return if not kw['install_to']: return if kw['type'] == 'symlink_as' and Utils.is_win32: if kw.get('win32_install'): kw['type'] = 'install_as' else: # just exit return tsk = self.install_task = self.create_task('inst') tsk.chmod = kw.get('chmod', Utils.O644) tsk.link = kw.get('link', '') or kw.get('install_from', '') tsk.relative_trick = kw.get('relative_trick', False) tsk.type = kw['type'] tsk.install_to = tsk.dest = kw['install_to'] tsk.install_from = kw['install_from'] tsk.relative_base = kw.get('cwd') or kw.get('relative_base', self.path) tsk.install_user = kw.get('install_user') tsk.install_group = kw.get('install_group') tsk.init_files() if not kw.get('postpone', True): tsk.run_now() return tsk @TaskGen.taskgen_method def add_install_files(self, **kw): """ Creates an installation task for files :returns: An installation task :rtype: :py:class:`waflib.Build.inst` """ kw['type'] = 'install_files' return self.add_install_task(**kw) @TaskGen.taskgen_method def add_install_as(self, **kw): """ Creates an installation task for a single file :returns: An installation task :rtype: :py:class:`waflib.Build.inst` """ kw['type'] = 'install_as' return self.add_install_task(**kw) @TaskGen.taskgen_method def add_symlink_as(self, **kw): """ Creates an installation task for a symbolic link :returns: An installation task :rtype: :py:class:`waflib.Build.inst` """ kw['type'] = 'symlink_as' return self.add_install_task(**kw) class inst(Task.Task): """Task that installs files or symlinks; it is typically executed by :py:class:`waflib.Build.InstallContext` and :py:class:`waflib.Build.UnInstallContext`""" def __str__(self): """Returns an empty string to disable the standard task display""" return '' def uid(self): """Returns a unique identifier for the task""" lst = self.inputs + self.outputs + [self.link, self.generator.path.abspath()] return Utils.h_list(lst) def init_files(self): """ Initializes the task input and output nodes """ if self.type == 'symlink_as': inputs = [] else: inputs = self.generator.to_nodes(self.install_from) if self.type == 'install_as': assert len(inputs) == 1 self.set_inputs(inputs) dest = self.get_install_path() outputs = [] if self.type == 'symlink_as': if self.relative_trick: self.link = os.path.relpath(self.link, os.path.dirname(dest)) outputs.append(self.generator.bld.root.make_node(dest)) elif self.type == 'install_as': outputs.append(self.generator.bld.root.make_node(dest)) else: for y in inputs: if self.relative_trick: destfile = os.path.join(dest, y.path_from(self.relative_base)) else: destfile = os.path.join(dest, y.name) outputs.append(self.generator.bld.root.make_node(destfile)) self.set_outputs(outputs) def runnable_status(self): """ Installation tasks are always executed, so this method returns either :py:const:`waflib.Task.ASK_LATER` or :py:const:`waflib.Task.RUN_ME`. """ ret = super(inst, self).runnable_status() if ret == Task.SKIP_ME and self.generator.bld.is_install: return Task.RUN_ME return ret def post_run(self): """ Disables any post-run operations """ pass def get_install_path(self, destdir=True): """ Returns the destination path where files will be installed, pre-pending `destdir`. Relative paths will be interpreted relative to `PREFIX` if no `destdir` is given. :rtype: string """ if isinstance(self.install_to, Node.Node): dest = self.install_to.abspath() else: dest = os.path.normpath(Utils.subst_vars(self.install_to, self.env)) if not os.path.isabs(dest): dest = os.path.join(self.env.PREFIX, dest) if destdir and Options.options.destdir: dest = os.path.join(Options.options.destdir, os.path.splitdrive(dest)[1].lstrip(os.sep)) return dest def copy_fun(self, src, tgt): """ Copies a file from src to tgt, preserving permissions and trying to work around path limitations on Windows platforms. On Unix-like platforms, the owner/group of the target file may be set through install_user/install_group :param src: absolute path :type src: string :param tgt: absolute path :type tgt: string """ # override this if you want to strip executables # kw['tsk'].source is the task that created the files in the build if Utils.is_win32 and len(tgt) > 259 and not tgt.startswith('\\\\?\\'): tgt = '\\\\?\\' + tgt shutil.copy2(src, tgt) self.fix_perms(tgt) def rm_empty_dirs(self, tgt): """ Removes empty folders recursively when uninstalling. :param tgt: absolute path :type tgt: string """ while tgt: tgt = os.path.dirname(tgt) try: os.rmdir(tgt) except OSError: break def run(self): """ Performs file or symlink installation """ is_install = self.generator.bld.is_install if not is_install: # unnecessary? return for x in self.outputs: if is_install == INSTALL: x.parent.mkdir() if self.type == 'symlink_as': fun = is_install == INSTALL and self.do_link or self.do_unlink fun(self.link, self.outputs[0].abspath()) else: fun = is_install == INSTALL and self.do_install or self.do_uninstall launch_node = self.generator.bld.launch_node() for x, y in zip(self.inputs, self.outputs): fun(x.abspath(), y.abspath(), x.path_from(launch_node)) def run_now(self): """ Try executing the installation task right now :raises: :py:class:`waflib.Errors.TaskNotReady` """ status = self.runnable_status() if status not in (Task.RUN_ME, Task.SKIP_ME): raise Errors.TaskNotReady('Could not process %r: status %r' % (self, status)) self.run() self.hasrun = Task.SUCCESS def do_install(self, src, tgt, lbl, **kw): """ Copies a file from src to tgt with given file permissions. The actual copy is only performed if the source and target file sizes or timestamps differ. When the copy occurs, the file is always first removed and then copied so as to prevent stale inodes. :param src: file name as absolute path :type src: string :param tgt: file destination, as absolute path :type tgt: string :param lbl: file source description :type lbl: string :param chmod: installation mode :type chmod: int :raises: :py:class:`waflib.Errors.WafError` if the file cannot be written """ if not Options.options.force: # check if the file is already there to avoid a copy try: st1 = os.stat(tgt) st2 = os.stat(src) except OSError: pass else: # same size and identical timestamps -> make no copy if st1.st_mtime + 2 >= st2.st_mtime and st1.st_size == st2.st_size: if not self.generator.bld.progress_bar: c1 = Logs.colors.NORMAL c2 = Logs.colors.BLUE Logs.info('%s- install %s%s%s (from %s)', c1, c2, tgt, c1, lbl) return False if not self.generator.bld.progress_bar: c1 = Logs.colors.NORMAL c2 = Logs.colors.BLUE Logs.info('%s+ install %s%s%s (from %s)', c1, c2, tgt, c1, lbl) # Give best attempt at making destination overwritable, # like the 'install' utility used by 'make install' does. try: os.chmod(tgt, Utils.O644 | stat.S_IMODE(os.stat(tgt).st_mode)) except EnvironmentError: pass # following is for shared libs and stale inodes (-_-) try: os.remove(tgt) except OSError: pass try: self.copy_fun(src, tgt) except EnvironmentError as e: if not os.path.exists(src): Logs.error('File %r does not exist', src) elif not os.path.isfile(src): Logs.error('Input %r is not a file', src) raise Errors.WafError('Could not install the file %r' % tgt, e) def fix_perms(self, tgt): """ Change the ownership of the file/folder/link pointed by the given path This looks up for `install_user` or `install_group` attributes on the task or on the task generator:: def build(bld): bld.install_as('${PREFIX}/wscript', 'wscript', install_user='nobody', install_group='nogroup') bld.symlink_as('${PREFIX}/wscript_link', Utils.subst_vars('${PREFIX}/wscript', bld.env), install_user='nobody', install_group='nogroup') """ if not Utils.is_win32: user = getattr(self, 'install_user', None) or getattr(self.generator, 'install_user', None) group = getattr(self, 'install_group', None) or getattr(self.generator, 'install_group', None) if user or group: Utils.lchown(tgt, user or -1, group or -1) if not os.path.islink(tgt): os.chmod(tgt, self.chmod) def do_link(self, src, tgt, **kw): """ Creates a symlink from tgt to src. :param src: file name as absolute path :type src: string :param tgt: file destination, as absolute path :type tgt: string """ if os.path.islink(tgt) and os.readlink(tgt) == src: if not self.generator.bld.progress_bar: c1 = Logs.colors.NORMAL c2 = Logs.colors.BLUE Logs.info('%s- symlink %s%s%s (to %s)', c1, c2, tgt, c1, src) else: try: os.remove(tgt) except OSError: pass if not self.generator.bld.progress_bar: c1 = Logs.colors.NORMAL c2 = Logs.colors.BLUE Logs.info('%s+ symlink %s%s%s (to %s)', c1, c2, tgt, c1, src) os.symlink(src, tgt) self.fix_perms(tgt) def do_uninstall(self, src, tgt, lbl, **kw): """ See :py:meth:`waflib.Build.inst.do_install` """ if not self.generator.bld.progress_bar: c1 = Logs.colors.NORMAL c2 = Logs.colors.BLUE Logs.info('%s- remove %s%s%s', c1, c2, tgt, c1) #self.uninstall.append(tgt) try: os.remove(tgt) except OSError as e: if e.errno != errno.ENOENT: if not getattr(self, 'uninstall_error', None): self.uninstall_error = True Logs.warn('build: some files could not be uninstalled (retry with -vv to list them)') if Logs.verbose > 1: Logs.warn('Could not remove %s (error code %r)', e.filename, e.errno) self.rm_empty_dirs(tgt) def do_unlink(self, src, tgt, **kw): """ See :py:meth:`waflib.Build.inst.do_link` """ try: if not self.generator.bld.progress_bar: c1 = Logs.colors.NORMAL c2 = Logs.colors.BLUE Logs.info('%s- remove %s%s%s', c1, c2, tgt, c1) os.remove(tgt) except OSError: pass self.rm_empty_dirs(tgt) class InstallContext(BuildContext): '''installs the targets on the system''' cmd = 'install' def __init__(self, **kw): super(InstallContext, self).__init__(**kw) self.is_install = INSTALL class UninstallContext(InstallContext): '''removes the targets installed''' cmd = 'uninstall' def __init__(self, **kw): super(UninstallContext, self).__init__(**kw) self.is_install = UNINSTALL class CleanContext(BuildContext): '''cleans the project''' cmd = 'clean' def execute(self): """ See :py:func:`waflib.Build.BuildContext.execute`. """ self.restore() if not self.all_envs: self.load_envs() self.recurse([self.run_dir]) try: self.clean() finally: self.store() def clean(self): """ Remove most files from the build directory, and reset all caches. Custom lists of files to clean can be declared as `bld.clean_files`. For example, exclude `build/program/myprogram` from getting removed:: def build(bld): bld.clean_files = bld.bldnode.ant_glob('**', excl='.lock* config.log c4che/* config.h program/myprogram', quiet=True, generator=True) """ Logs.debug('build: clean called') if hasattr(self, 'clean_files'): for n in self.clean_files: n.delete() elif self.bldnode != self.srcnode: # would lead to a disaster if top == out lst = [] for env in self.all_envs.values(): lst.extend(self.root.find_or_declare(f) for f in env[CFG_FILES]) excluded_dirs = '.lock* *conf_check_*/** config.log %s/*' % CACHE_DIR for n in self.bldnode.ant_glob('**/*', excl=excluded_dirs, quiet=True): if n in lst: continue n.delete() self.root.children = {} for v in SAVED_ATTRS: if v == 'root': continue setattr(self, v, {}) class ListContext(BuildContext): '''lists the targets to execute''' cmd = 'list' def execute(self): """ In addition to printing the name of each build target, a description column will include text for each task generator which has a "description" field set. See :py:func:`waflib.Build.BuildContext.execute`. """ self.restore() if not self.all_envs: self.load_envs() self.recurse([self.run_dir]) self.pre_build() # display the time elapsed in the progress bar self.timer = Utils.Timer() for g in self.groups: for tg in g: try: f = tg.post except AttributeError: pass else: f() try: # force the cache initialization self.get_tgen_by_name('') except Errors.WafError: pass targets = sorted(self.task_gen_cache_names) # figure out how much to left-justify, for largest target name line_just = max(len(t) for t in targets) if targets else 0 for target in targets: tgen = self.task_gen_cache_names[target] # Support displaying the description for the target # if it was set on the tgen descript = getattr(tgen, 'description', '') if descript: target = target.ljust(line_just) descript = ': %s' % descript Logs.pprint('GREEN', target, label=descript) class StepContext(BuildContext): '''executes tasks in a step-by-step fashion, for debugging''' cmd = 'step' def __init__(self, **kw): super(StepContext, self).__init__(**kw) self.files = Options.options.files def compile(self): """ Overrides :py:meth:`waflib.Build.BuildContext.compile` to perform a partial build on tasks matching the input/output pattern given (regular expression matching):: $ waf step --files=foo.c,bar.c,in:truc.c,out:bar.o $ waf step --files=in:foo.cpp.1.o # link task only """ if not self.files: Logs.warn('Add a pattern for the debug build, for example "waf step --files=main.c,app"') BuildContext.compile(self) return targets = [] if self.targets and self.targets != '*': targets = self.targets.split(',') for g in self.groups: for tg in g: if targets and tg.name not in targets: continue try: f = tg.post except AttributeError: pass else: f() for pat in self.files.split(','): matcher = self.get_matcher(pat) for tg in g: if isinstance(tg, Task.Task): lst = [tg] else: lst = tg.tasks for tsk in lst: do_exec = False for node in tsk.inputs: if matcher(node, output=False): do_exec = True break for node in tsk.outputs: if matcher(node, output=True): do_exec = True break if do_exec: ret = tsk.run() Logs.info('%s -> exit %r', tsk, ret) def get_matcher(self, pat): """ Converts a step pattern into a function :param: pat: pattern of the form in:truc.c,out:bar.o :returns: Python function that uses Node objects as inputs and returns matches :rtype: function """ # this returns a function inn = True out = True if pat.startswith('in:'): out = False pat = pat.replace('in:', '') elif pat.startswith('out:'): inn = False pat = pat.replace('out:', '') anode = self.root.find_node(pat) pattern = None if not anode: if not pat.startswith('^'): pat = '^.+?%s' % pat if not pat.endswith('$'): pat = '%s$' % pat pattern = re.compile(pat) def match(node, output): if output and not out: return False if not output and not inn: return False if anode: return anode == node else: return pattern.match(node.abspath()) return match class EnvContext(BuildContext): """Subclass EnvContext to create commands that require configuration data in 'env'""" fun = cmd = None def execute(self): """ See :py:func:`waflib.Build.BuildContext.execute`. """ self.restore() if not self.all_envs: self.load_envs() self.recurse([self.run_dir]) tdb-1.4.2/third_party/waf/waflib/ConfigSet.py0000660000000000000000000002014613527011455021036 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005-2018 (ita) """ ConfigSet: a special dict The values put in :py:class:`ConfigSet` must be serializable (dicts, lists, strings) """ import copy, re, os from waflib import Logs, Utils re_imp = re.compile(r'^(#)*?([^#=]*?)\ =\ (.*?)$', re.M) class ConfigSet(object): """ A copy-on-write dict with human-readable serialized format. The serialization format is human-readable (python-like) and performed by using eval() and repr(). For high performance prefer pickle. Do not store functions as they are not serializable. The values can be accessed by attributes or by keys:: from waflib.ConfigSet import ConfigSet env = ConfigSet() env.FOO = 'test' env['FOO'] = 'test' """ __slots__ = ('table', 'parent') def __init__(self, filename=None): self.table = {} """ Internal dict holding the object values """ #self.parent = None if filename: self.load(filename) def __contains__(self, key): """ Enables the *in* syntax:: if 'foo' in env: print(env['foo']) """ if key in self.table: return True try: return self.parent.__contains__(key) except AttributeError: return False # parent may not exist def keys(self): """Dict interface""" keys = set() cur = self while cur: keys.update(cur.table.keys()) cur = getattr(cur, 'parent', None) keys = list(keys) keys.sort() return keys def __iter__(self): return iter(self.keys()) def __str__(self): """Text representation of the ConfigSet (for debugging purposes)""" return "\n".join(["%r %r" % (x, self.__getitem__(x)) for x in self.keys()]) def __getitem__(self, key): """ Dictionary interface: get value from key:: def configure(conf): conf.env['foo'] = {} print(env['foo']) """ try: while 1: x = self.table.get(key) if not x is None: return x self = self.parent except AttributeError: return [] def __setitem__(self, key, value): """ Dictionary interface: set value from key """ self.table[key] = value def __delitem__(self, key): """ Dictionary interface: mark the value as missing """ self[key] = [] def __getattr__(self, name): """ Attribute access provided for convenience. The following forms are equivalent:: def configure(conf): conf.env.value conf.env['value'] """ if name in self.__slots__: return object.__getattribute__(self, name) else: return self[name] def __setattr__(self, name, value): """ Attribute access provided for convenience. The following forms are equivalent:: def configure(conf): conf.env.value = x env['value'] = x """ if name in self.__slots__: object.__setattr__(self, name, value) else: self[name] = value def __delattr__(self, name): """ Attribute access provided for convenience. The following forms are equivalent:: def configure(conf): del env.value del env['value'] """ if name in self.__slots__: object.__delattr__(self, name) else: del self[name] def derive(self): """ Returns a new ConfigSet deriving from self. The copy returned will be a shallow copy:: from waflib.ConfigSet import ConfigSet env = ConfigSet() env.append_value('CFLAGS', ['-O2']) child = env.derive() child.CFLAGS.append('test') # warning! this will modify 'env' child.CFLAGS = ['-O3'] # new list, ok child.append_value('CFLAGS', ['-O3']) # ok Use :py:func:`ConfigSet.detach` to detach the child from the parent. """ newenv = ConfigSet() newenv.parent = self return newenv def detach(self): """ Detaches this instance from its parent (if present) Modifying the parent :py:class:`ConfigSet` will not change the current object Modifying this :py:class:`ConfigSet` will not modify the parent one. """ tbl = self.get_merged_dict() try: delattr(self, 'parent') except AttributeError: pass else: keys = tbl.keys() for x in keys: tbl[x] = copy.deepcopy(tbl[x]) self.table = tbl return self def get_flat(self, key): """ Returns a value as a string. If the input is a list, the value returned is space-separated. :param key: key to use :type key: string """ s = self[key] if isinstance(s, str): return s return ' '.join(s) def _get_list_value_for_modification(self, key): """ Returns a list value for further modification. The list may be modified inplace and there is no need to do this afterwards:: self.table[var] = value """ try: value = self.table[key] except KeyError: try: value = self.parent[key] except AttributeError: value = [] else: if isinstance(value, list): # force a copy value = value[:] else: value = [value] self.table[key] = value else: if not isinstance(value, list): self.table[key] = value = [value] return value def append_value(self, var, val): """ Appends a value to the specified config key:: def build(bld): bld.env.append_value('CFLAGS', ['-O2']) The value must be a list or a tuple """ if isinstance(val, str): # if there were string everywhere we could optimize this val = [val] current_value = self._get_list_value_for_modification(var) current_value.extend(val) def prepend_value(self, var, val): """ Prepends a value to the specified item:: def configure(conf): conf.env.prepend_value('CFLAGS', ['-O2']) The value must be a list or a tuple """ if isinstance(val, str): val = [val] self.table[var] = val + self._get_list_value_for_modification(var) def append_unique(self, var, val): """ Appends a value to the specified item only if it's not already present:: def build(bld): bld.env.append_unique('CFLAGS', ['-O2', '-g']) The value must be a list or a tuple """ if isinstance(val, str): val = [val] current_value = self._get_list_value_for_modification(var) for x in val: if x not in current_value: current_value.append(x) def get_merged_dict(self): """ Computes the merged dictionary from the fusion of self and all its parent :rtype: a ConfigSet object """ table_list = [] env = self while 1: table_list.insert(0, env.table) try: env = env.parent except AttributeError: break merged_table = {} for table in table_list: merged_table.update(table) return merged_table def store(self, filename): """ Serializes the :py:class:`ConfigSet` data to a file. See :py:meth:`ConfigSet.load` for reading such files. :param filename: file to use :type filename: string """ try: os.makedirs(os.path.split(filename)[0]) except OSError: pass buf = [] merged_table = self.get_merged_dict() keys = list(merged_table.keys()) keys.sort() try: fun = ascii except NameError: fun = repr for k in keys: if k != 'undo_stack': buf.append('%s = %s\n' % (k, fun(merged_table[k]))) Utils.writef(filename, ''.join(buf)) def load(self, filename): """ Restores contents from a file (current values are not cleared). Files are written using :py:meth:`ConfigSet.store`. :param filename: file to use :type filename: string """ tbl = self.table code = Utils.readf(filename, m='r') for m in re_imp.finditer(code): g = m.group tbl[g(2)] = eval(g(3)) Logs.debug('env: %s', self.table) def update(self, d): """ Dictionary interface: replace values with the ones from another dict :param d: object to use the value from :type d: dict-like object """ self.table.update(d) def stash(self): """ Stores the object state to provide transactionality semantics:: env = ConfigSet() env.stash() try: env.append_value('CFLAGS', '-O3') call_some_method(env) finally: env.revert() The history is kept in a stack, and is lost during the serialization by :py:meth:`ConfigSet.store` """ orig = self.table tbl = self.table = self.table.copy() for x in tbl.keys(): tbl[x] = copy.deepcopy(tbl[x]) self.undo_stack = self.undo_stack + [orig] def commit(self): """ Commits transactional changes. See :py:meth:`ConfigSet.stash` """ self.undo_stack.pop(-1) def revert(self): """ Reverts the object to a previous state. See :py:meth:`ConfigSet.stash` """ self.table = self.undo_stack.pop(-1) tdb-1.4.2/third_party/waf/waflib/Configure.py0000660000000000000000000004443513527011455021105 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005-2018 (ita) """ Configuration system A :py:class:`waflib.Configure.ConfigurationContext` instance is created when ``waf configure`` is called, it is used to: * create data dictionaries (ConfigSet instances) * store the list of modules to import * hold configuration routines such as ``find_program``, etc """ import os, re, shlex, shutil, sys, time, traceback from waflib import ConfigSet, Utils, Options, Logs, Context, Build, Errors WAF_CONFIG_LOG = 'config.log' """Name of the configuration log file""" autoconfig = False """Execute the configuration automatically""" conf_template = '''# project %(app)s configured on %(now)s by # waf %(wafver)s (abi %(abi)s, python %(pyver)x on %(systype)s) # using %(args)s #''' class ConfigurationContext(Context.Context): '''configures the project''' cmd = 'configure' error_handlers = [] """ Additional functions to handle configuration errors """ def __init__(self, **kw): super(ConfigurationContext, self).__init__(**kw) self.environ = dict(os.environ) self.all_envs = {} self.top_dir = None self.out_dir = None self.tools = [] # tools loaded in the configuration, and that will be loaded when building self.hash = 0 self.files = [] self.tool_cache = [] self.setenv('') def setenv(self, name, env=None): """ Set a new config set for conf.env. If a config set of that name already exists, recall it without modification. The name is the filename prefix to save to ``c4che/NAME_cache.py``, and it is also used as *variants* by the build commands. Though related to variants, whatever kind of data may be stored in the config set:: def configure(cfg): cfg.env.ONE = 1 cfg.setenv('foo') cfg.env.ONE = 2 def build(bld): 2 == bld.env_of_name('foo').ONE :param name: name of the configuration set :type name: string :param env: ConfigSet to copy, or an empty ConfigSet is created :type env: :py:class:`waflib.ConfigSet.ConfigSet` """ if name not in self.all_envs or env: if not env: env = ConfigSet.ConfigSet() self.prepare_env(env) else: env = env.derive() self.all_envs[name] = env self.variant = name def get_env(self): """Getter for the env property""" return self.all_envs[self.variant] def set_env(self, val): """Setter for the env property""" self.all_envs[self.variant] = val env = property(get_env, set_env) def init_dirs(self): """ Initialize the project directory and the build directory """ top = self.top_dir if not top: top = Options.options.top if not top: top = getattr(Context.g_module, Context.TOP, None) if not top: top = self.path.abspath() top = os.path.abspath(top) self.srcnode = (os.path.isabs(top) and self.root or self.path).find_dir(top) assert(self.srcnode) out = self.out_dir if not out: out = Options.options.out if not out: out = getattr(Context.g_module, Context.OUT, None) if not out: out = Options.lockfile.replace('.lock-waf_%s_' % sys.platform, '').replace('.lock-waf', '') # someone can be messing with symlinks out = os.path.realpath(out) self.bldnode = (os.path.isabs(out) and self.root or self.path).make_node(out) self.bldnode.mkdir() if not os.path.isdir(self.bldnode.abspath()): self.fatal('Could not create the build directory %s' % self.bldnode.abspath()) def execute(self): """ See :py:func:`waflib.Context.Context.execute` """ self.init_dirs() self.cachedir = self.bldnode.make_node(Build.CACHE_DIR) self.cachedir.mkdir() path = os.path.join(self.bldnode.abspath(), WAF_CONFIG_LOG) self.logger = Logs.make_logger(path, 'cfg') app = getattr(Context.g_module, 'APPNAME', '') if app: ver = getattr(Context.g_module, 'VERSION', '') if ver: app = "%s (%s)" % (app, ver) params = {'now': time.ctime(), 'pyver': sys.hexversion, 'systype': sys.platform, 'args': " ".join(sys.argv), 'wafver': Context.WAFVERSION, 'abi': Context.ABI, 'app': app} self.to_log(conf_template % params) self.msg('Setting top to', self.srcnode.abspath()) self.msg('Setting out to', self.bldnode.abspath()) if id(self.srcnode) == id(self.bldnode): Logs.warn('Setting top == out') elif id(self.path) != id(self.srcnode): if self.srcnode.is_child_of(self.path): Logs.warn('Are you certain that you do not want to set top="." ?') super(ConfigurationContext, self).execute() self.store() Context.top_dir = self.srcnode.abspath() Context.out_dir = self.bldnode.abspath() # this will write a configure lock so that subsequent builds will # consider the current path as the root directory (see prepare_impl). # to remove: use 'waf distclean' env = ConfigSet.ConfigSet() env.argv = sys.argv env.options = Options.options.__dict__ env.config_cmd = self.cmd env.run_dir = Context.run_dir env.top_dir = Context.top_dir env.out_dir = Context.out_dir # conf.hash & conf.files hold wscript files paths and hash # (used only by Configure.autoconfig) env.hash = self.hash env.files = self.files env.environ = dict(self.environ) env.launch_dir = Context.launch_dir if not (self.env.NO_LOCK_IN_RUN or env.environ.get('NO_LOCK_IN_RUN') or getattr(Options.options, 'no_lock_in_run')): env.store(os.path.join(Context.run_dir, Options.lockfile)) if not (self.env.NO_LOCK_IN_TOP or env.environ.get('NO_LOCK_IN_TOP') or getattr(Options.options, 'no_lock_in_top')): env.store(os.path.join(Context.top_dir, Options.lockfile)) if not (self.env.NO_LOCK_IN_OUT or env.environ.get('NO_LOCK_IN_OUT') or getattr(Options.options, 'no_lock_in_out')): env.store(os.path.join(Context.out_dir, Options.lockfile)) def prepare_env(self, env): """ Insert *PREFIX*, *BINDIR* and *LIBDIR* values into ``env`` :type env: :py:class:`waflib.ConfigSet.ConfigSet` :param env: a ConfigSet, usually ``conf.env`` """ if not env.PREFIX: if Options.options.prefix or Utils.is_win32: env.PREFIX = Options.options.prefix else: env.PREFIX = '/' if not env.BINDIR: if Options.options.bindir: env.BINDIR = Options.options.bindir else: env.BINDIR = Utils.subst_vars('${PREFIX}/bin', env) if not env.LIBDIR: if Options.options.libdir: env.LIBDIR = Options.options.libdir else: env.LIBDIR = Utils.subst_vars('${PREFIX}/lib%s' % Utils.lib64(), env) def store(self): """Save the config results into the cache file""" n = self.cachedir.make_node('build.config.py') n.write('version = 0x%x\ntools = %r\n' % (Context.HEXVERSION, self.tools)) if not self.all_envs: self.fatal('nothing to store in the configuration context!') for key in self.all_envs: tmpenv = self.all_envs[key] tmpenv.store(os.path.join(self.cachedir.abspath(), key + Build.CACHE_SUFFIX)) def load(self, tool_list, tooldir=None, funs=None, with_sys_path=True, cache=False): """ Load Waf tools, which will be imported whenever a build is started. :param tool_list: waf tools to import :type tool_list: list of string :param tooldir: paths for the imports :type tooldir: list of string :param funs: functions to execute from the waf tools :type funs: list of string :param cache: whether to prevent the tool from running twice :type cache: bool """ tools = Utils.to_list(tool_list) if tooldir: tooldir = Utils.to_list(tooldir) for tool in tools: # avoid loading the same tool more than once with the same functions # used by composite projects if cache: mag = (tool, id(self.env), tooldir, funs) if mag in self.tool_cache: self.to_log('(tool %s is already loaded, skipping)' % tool) continue self.tool_cache.append(mag) module = None try: module = Context.load_tool(tool, tooldir, ctx=self, with_sys_path=with_sys_path) except ImportError as e: self.fatal('Could not load the Waf tool %r from %r\n%s' % (tool, getattr(e, 'waf_sys_path', sys.path), e)) except Exception as e: self.to_log('imp %r (%r & %r)' % (tool, tooldir, funs)) self.to_log(traceback.format_exc()) raise if funs is not None: self.eval_rules(funs) else: func = getattr(module, 'configure', None) if func: if type(func) is type(Utils.readf): func(self) else: self.eval_rules(func) self.tools.append({'tool':tool, 'tooldir':tooldir, 'funs':funs}) def post_recurse(self, node): """ Records the path and a hash of the scripts visited, see :py:meth:`waflib.Context.Context.post_recurse` :param node: script :type node: :py:class:`waflib.Node.Node` """ super(ConfigurationContext, self).post_recurse(node) self.hash = Utils.h_list((self.hash, node.read('rb'))) self.files.append(node.abspath()) def eval_rules(self, rules): """ Execute configuration tests provided as list of functions to run :param rules: list of configuration method names :type rules: list of string """ self.rules = Utils.to_list(rules) for x in self.rules: f = getattr(self, x) if not f: self.fatal('No such configuration function %r' % x) f() def conf(f): """ Decorator: attach new configuration functions to :py:class:`waflib.Build.BuildContext` and :py:class:`waflib.Configure.ConfigurationContext`. The methods bound will accept a parameter named 'mandatory' to disable the configuration errors:: def configure(conf): conf.find_program('abc', mandatory=False) :param f: method to bind :type f: function """ def fun(*k, **kw): mandatory = kw.pop('mandatory', True) try: return f(*k, **kw) except Errors.ConfigurationError: if mandatory: raise fun.__name__ = f.__name__ setattr(ConfigurationContext, f.__name__, fun) setattr(Build.BuildContext, f.__name__, fun) return f @conf def add_os_flags(self, var, dest=None, dup=False): """ Import operating system environment values into ``conf.env`` dict:: def configure(conf): conf.add_os_flags('CFLAGS') :param var: variable to use :type var: string :param dest: destination variable, by default the same as var :type dest: string :param dup: add the same set of flags again :type dup: bool """ try: flags = shlex.split(self.environ[var]) except KeyError: return if dup or ''.join(flags) not in ''.join(Utils.to_list(self.env[dest or var])): self.env.append_value(dest or var, flags) @conf def cmd_to_list(self, cmd): """ Detect if a command is written in pseudo shell like ``ccache g++`` and return a list. :param cmd: command :type cmd: a string or a list of string """ if isinstance(cmd, str): if os.path.isfile(cmd): # do not take any risk return [cmd] if os.sep == '/': return shlex.split(cmd) else: try: return shlex.split(cmd, posix=False) except TypeError: # Python 2.5 on windows? return shlex.split(cmd) return cmd @conf def check_waf_version(self, mini='1.9.99', maxi='2.1.0', **kw): """ Raise a Configuration error if the Waf version does not strictly match the given bounds:: conf.check_waf_version(mini='1.9.99', maxi='2.1.0') :type mini: number, tuple or string :param mini: Minimum required version :type maxi: number, tuple or string :param maxi: Maximum allowed version """ self.start_msg('Checking for waf version in %s-%s' % (str(mini), str(maxi)), **kw) ver = Context.HEXVERSION if Utils.num2ver(mini) > ver: self.fatal('waf version should be at least %r (%r found)' % (Utils.num2ver(mini), ver)) if Utils.num2ver(maxi) < ver: self.fatal('waf version should be at most %r (%r found)' % (Utils.num2ver(maxi), ver)) self.end_msg('ok', **kw) @conf def find_file(self, filename, path_list=[]): """ Find a file in a list of paths :param filename: name of the file to search for :param path_list: list of directories to search :return: the first matching filename; else a configuration exception is raised """ for n in Utils.to_list(filename): for d in Utils.to_list(path_list): p = os.path.expanduser(os.path.join(d, n)) if os.path.exists(p): return p self.fatal('Could not find %r' % filename) @conf def find_program(self, filename, **kw): """ Search for a program on the operating system When var is used, you may set os.environ[var] to help find a specific program version, for example:: $ CC='ccache gcc' waf configure :param path_list: paths to use for searching :type param_list: list of string :param var: store the result to conf.env[var] where var defaults to filename.upper() if not provided; the result is stored as a list of strings :type var: string :param value: obtain the program from the value passed exclusively :type value: list or string (list is preferred) :param exts: list of extensions for the binary (do not add an extension for portability) :type exts: list of string :param msg: name to display in the log, by default filename is used :type msg: string :param interpreter: interpreter for the program :type interpreter: ConfigSet variable key :raises: :py:class:`waflib.Errors.ConfigurationError` """ exts = kw.get('exts', Utils.is_win32 and '.exe,.com,.bat,.cmd' or ',.sh,.pl,.py') environ = kw.get('environ', getattr(self, 'environ', os.environ)) ret = '' filename = Utils.to_list(filename) msg = kw.get('msg', ', '.join(filename)) var = kw.get('var', '') if not var: var = re.sub(r'[-.]', '_', filename[0].upper()) path_list = kw.get('path_list', '') if path_list: path_list = Utils.to_list(path_list) else: path_list = environ.get('PATH', '').split(os.pathsep) if kw.get('value'): # user-provided in command-line options and passed to find_program ret = self.cmd_to_list(kw['value']) elif environ.get(var): # user-provided in the os environment ret = self.cmd_to_list(environ[var]) elif self.env[var]: # a default option in the wscript file ret = self.cmd_to_list(self.env[var]) else: if not ret: ret = self.find_binary(filename, exts.split(','), path_list) if not ret and Utils.winreg: ret = Utils.get_registry_app_path(Utils.winreg.HKEY_CURRENT_USER, filename) if not ret and Utils.winreg: ret = Utils.get_registry_app_path(Utils.winreg.HKEY_LOCAL_MACHINE, filename) ret = self.cmd_to_list(ret) if ret: if len(ret) == 1: retmsg = ret[0] else: retmsg = ret else: retmsg = False self.msg('Checking for program %r' % msg, retmsg, **kw) if not kw.get('quiet'): self.to_log('find program=%r paths=%r var=%r -> %r' % (filename, path_list, var, ret)) if not ret: self.fatal(kw.get('errmsg', '') or 'Could not find the program %r' % filename) interpreter = kw.get('interpreter') if interpreter is None: if not Utils.check_exe(ret[0], env=environ): self.fatal('Program %r is not executable' % ret) self.env[var] = ret else: self.env[var] = self.env[interpreter] + ret return ret @conf def find_binary(self, filenames, exts, paths): for f in filenames: for ext in exts: exe_name = f + ext if os.path.isabs(exe_name): if os.path.isfile(exe_name): return exe_name else: for path in paths: x = os.path.expanduser(os.path.join(path, exe_name)) if os.path.isfile(x): return x return None @conf def run_build(self, *k, **kw): """ Create a temporary build context to execute a build. A reference to that build context is kept on self.test_bld for debugging purposes, and you should not rely on it too much (read the note on the cache below). The parameters given in the arguments to this function are passed as arguments for a single task generator created in the build. Only three parameters are obligatory: :param features: features to pass to a task generator created in the build :type features: list of string :param compile_filename: file to create for the compilation (default: *test.c*) :type compile_filename: string :param code: code to write in the filename to compile :type code: string Though this function returns *0* by default, the build may set an attribute named *retval* on the build context object to return a particular value. See :py:func:`waflib.Tools.c_config.test_exec_fun` for example. This function also provides a limited cache. To use it, provide the following option:: def options(opt): opt.add_option('--confcache', dest='confcache', default=0, action='count', help='Use a configuration cache') And execute the configuration with the following command-line:: $ waf configure --confcache """ lst = [str(v) for (p, v) in kw.items() if p != 'env'] h = Utils.h_list(lst) dir = self.bldnode.abspath() + os.sep + (not Utils.is_win32 and '.' or '') + 'conf_check_' + Utils.to_hex(h) try: os.makedirs(dir) except OSError: pass try: os.stat(dir) except OSError: self.fatal('cannot use the configuration test folder %r' % dir) cachemode = getattr(Options.options, 'confcache', None) if cachemode == 1: try: proj = ConfigSet.ConfigSet(os.path.join(dir, 'cache_run_build')) except EnvironmentError: pass else: ret = proj['cache_run_build'] if isinstance(ret, str) and ret.startswith('Test does not build'): self.fatal(ret) return ret bdir = os.path.join(dir, 'testbuild') if not os.path.exists(bdir): os.makedirs(bdir) cls_name = kw.get('run_build_cls') or getattr(self, 'run_build_cls', 'build') self.test_bld = bld = Context.create_context(cls_name, top_dir=dir, out_dir=bdir) bld.init_dirs() bld.progress_bar = 0 bld.targets = '*' bld.logger = self.logger bld.all_envs.update(self.all_envs) # not really necessary bld.env = kw['env'] bld.kw = kw bld.conf = self kw['build_fun'](bld) ret = -1 try: try: bld.compile() except Errors.WafError: ret = 'Test does not build: %s' % traceback.format_exc() self.fatal(ret) else: ret = getattr(bld, 'retval', 0) finally: if cachemode == 1: # cache the results each time proj = ConfigSet.ConfigSet() proj['cache_run_build'] = ret proj.store(os.path.join(dir, 'cache_run_build')) else: shutil.rmtree(dir) return ret @conf def ret_msg(self, msg, args): if isinstance(msg, str): return msg return msg(args) @conf def test(self, *k, **kw): if not 'env' in kw: kw['env'] = self.env.derive() # validate_c for example if kw.get('validate'): kw['validate'](kw) self.start_msg(kw['msg'], **kw) ret = None try: ret = self.run_build(*k, **kw) except self.errors.ConfigurationError: self.end_msg(kw['errmsg'], 'YELLOW', **kw) if Logs.verbose > 1: raise else: self.fatal('The configuration failed') else: kw['success'] = ret if kw.get('post_check'): ret = kw['post_check'](kw) if ret: self.end_msg(kw['errmsg'], 'YELLOW', **kw) self.fatal('The configuration failed %r' % ret) else: self.end_msg(self.ret_msg(kw['okmsg'], kw), **kw) return ret tdb-1.4.2/third_party/waf/waflib/Context.py0000660000000000000000000005104513527011455020603 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2010-2018 (ita) """ Classes and functions enabling the command system """ import os, re, imp, sys from waflib import Utils, Errors, Logs import waflib.Node # the following 3 constants are updated on each new release (do not touch) HEXVERSION=0x2001100 """Constant updated on new releases""" WAFVERSION="2.0.17" """Constant updated on new releases""" WAFREVISION="6bc6cb599c702e985780e9f705b291b812123693" """Git revision when the waf version is updated""" ABI = 20 """Version of the build data cache file format (used in :py:const:`waflib.Context.DBFILE`)""" DBFILE = '.wafpickle-%s-%d-%d' % (sys.platform, sys.hexversion, ABI) """Name of the pickle file for storing the build data""" APPNAME = 'APPNAME' """Default application name (used by ``waf dist``)""" VERSION = 'VERSION' """Default application version (used by ``waf dist``)""" TOP = 'top' """The variable name for the top-level directory in wscript files""" OUT = 'out' """The variable name for the output directory in wscript files""" WSCRIPT_FILE = 'wscript' """Name of the waf script files""" launch_dir = '' """Directory from which waf has been called""" run_dir = '' """Location of the wscript file to use as the entry point""" top_dir = '' """Location of the project directory (top), if the project was configured""" out_dir = '' """Location of the build directory (out), if the project was configured""" waf_dir = '' """Directory containing the waf modules""" default_encoding = Utils.console_encoding() """Encoding to use when reading outputs from other processes""" g_module = None """ Module representing the top-level wscript file (see :py:const:`waflib.Context.run_dir`) """ STDOUT = 1 STDERR = -1 BOTH = 0 classes = [] """ List of :py:class:`waflib.Context.Context` subclasses that can be used as waf commands. The classes are added automatically by a metaclass. """ def create_context(cmd_name, *k, **kw): """ Returns a new :py:class:`waflib.Context.Context` instance corresponding to the given command. Used in particular by :py:func:`waflib.Scripting.run_command` :param cmd_name: command name :type cmd_name: string :param k: arguments to give to the context class initializer :type k: list :param k: keyword arguments to give to the context class initializer :type k: dict :return: Context object :rtype: :py:class:`waflib.Context.Context` """ for x in classes: if x.cmd == cmd_name: return x(*k, **kw) ctx = Context(*k, **kw) ctx.fun = cmd_name return ctx class store_context(type): """ Metaclass that registers command classes into the list :py:const:`waflib.Context.classes` Context classes must provide an attribute 'cmd' representing the command name, and a function attribute 'fun' representing the function name that the command uses. """ def __init__(cls, name, bases, dct): super(store_context, cls).__init__(name, bases, dct) name = cls.__name__ if name in ('ctx', 'Context'): return try: cls.cmd except AttributeError: raise Errors.WafError('Missing command for the context class %r (cmd)' % name) if not getattr(cls, 'fun', None): cls.fun = cls.cmd classes.insert(0, cls) ctx = store_context('ctx', (object,), {}) """Base class for all :py:class:`waflib.Context.Context` classes""" class Context(ctx): """ Default context for waf commands, and base class for new command contexts. Context objects are passed to top-level functions:: def foo(ctx): print(ctx.__class__.__name__) # waflib.Context.Context Subclasses must define the class attributes 'cmd' and 'fun': :param cmd: command to execute as in ``waf cmd`` :type cmd: string :param fun: function name to execute when the command is called :type fun: string .. inheritance-diagram:: waflib.Context.Context waflib.Build.BuildContext waflib.Build.InstallContext waflib.Build.UninstallContext waflib.Build.StepContext waflib.Build.ListContext waflib.Configure.ConfigurationContext waflib.Scripting.Dist waflib.Scripting.DistCheck waflib.Build.CleanContext """ errors = Errors """ Shortcut to :py:mod:`waflib.Errors` provided for convenience """ tools = {} """ A module cache for wscript files; see :py:meth:`Context.Context.load` """ def __init__(self, **kw): try: rd = kw['run_dir'] except KeyError: rd = run_dir # binds the context to the nodes in use to avoid a context singleton self.node_class = type('Nod3', (waflib.Node.Node,), {}) self.node_class.__module__ = 'waflib.Node' self.node_class.ctx = self self.root = self.node_class('', None) self.cur_script = None self.path = self.root.find_dir(rd) self.stack_path = [] self.exec_dict = {'ctx':self, 'conf':self, 'bld':self, 'opt':self} self.logger = None def finalize(self): """ Called to free resources such as logger files """ try: logger = self.logger except AttributeError: pass else: Logs.free_logger(logger) delattr(self, 'logger') def load(self, tool_list, *k, **kw): """ Loads a Waf tool as a module, and try calling the function named :py:const:`waflib.Context.Context.fun` from it. A ``tooldir`` argument may be provided as a list of module paths. :param tool_list: list of Waf tool names to load :type tool_list: list of string or space-separated string """ tools = Utils.to_list(tool_list) path = Utils.to_list(kw.get('tooldir', '')) with_sys_path = kw.get('with_sys_path', True) for t in tools: module = load_tool(t, path, with_sys_path=with_sys_path) fun = getattr(module, kw.get('name', self.fun), None) if fun: fun(self) def execute(self): """ Here, it calls the function name in the top-level wscript file. Most subclasses redefine this method to provide additional functionality. """ self.recurse([os.path.dirname(g_module.root_path)]) def pre_recurse(self, node): """ Method executed immediately before a folder is read by :py:meth:`waflib.Context.Context.recurse`. The current script is bound as a Node object on ``self.cur_script``, and the current path is bound to ``self.path`` :param node: script :type node: :py:class:`waflib.Node.Node` """ self.stack_path.append(self.cur_script) self.cur_script = node self.path = node.parent def post_recurse(self, node): """ Restores ``self.cur_script`` and ``self.path`` right after :py:meth:`waflib.Context.Context.recurse` terminates. :param node: script :type node: :py:class:`waflib.Node.Node` """ self.cur_script = self.stack_path.pop() if self.cur_script: self.path = self.cur_script.parent def recurse(self, dirs, name=None, mandatory=True, once=True, encoding=None): """ Runs user-provided functions from the supplied list of directories. The directories can be either absolute, or relative to the directory of the wscript file The methods :py:meth:`waflib.Context.Context.pre_recurse` and :py:meth:`waflib.Context.Context.post_recurse` are called immediately before and after a script has been executed. :param dirs: List of directories to visit :type dirs: list of string or space-separated string :param name: Name of function to invoke from the wscript :type name: string :param mandatory: whether sub wscript files are required to exist :type mandatory: bool :param once: read the script file once for a particular context :type once: bool """ try: cache = self.recurse_cache except AttributeError: cache = self.recurse_cache = {} for d in Utils.to_list(dirs): if not os.path.isabs(d): # absolute paths only d = os.path.join(self.path.abspath(), d) WSCRIPT = os.path.join(d, WSCRIPT_FILE) WSCRIPT_FUN = WSCRIPT + '_' + (name or self.fun) node = self.root.find_node(WSCRIPT_FUN) if node and (not once or node not in cache): cache[node] = True self.pre_recurse(node) try: function_code = node.read('r', encoding) exec(compile(function_code, node.abspath(), 'exec'), self.exec_dict) finally: self.post_recurse(node) elif not node: node = self.root.find_node(WSCRIPT) tup = (node, name or self.fun) if node and (not once or tup not in cache): cache[tup] = True self.pre_recurse(node) try: wscript_module = load_module(node.abspath(), encoding=encoding) user_function = getattr(wscript_module, (name or self.fun), None) if not user_function: if not mandatory: continue raise Errors.WafError('No function %r defined in %s' % (name or self.fun, node.abspath())) user_function(self) finally: self.post_recurse(node) elif not node: if not mandatory: continue try: os.listdir(d) except OSError: raise Errors.WafError('Cannot read the folder %r' % d) raise Errors.WafError('No wscript file in directory %s' % d) def log_command(self, cmd, kw): if Logs.verbose: fmt = os.environ.get('WAF_CMD_FORMAT') if fmt == 'string': if not isinstance(cmd, str): cmd = Utils.shell_escape(cmd) Logs.debug('runner: %r', cmd) Logs.debug('runner_env: kw=%s', kw) def exec_command(self, cmd, **kw): """ Runs an external process and returns the exit status:: def run(tsk): ret = tsk.generator.bld.exec_command('touch foo.txt') return ret If the context has the attribute 'log', then captures and logs the process stderr/stdout. Unlike :py:meth:`waflib.Context.Context.cmd_and_log`, this method does not return the stdout/stderr values captured. :param cmd: command argument for subprocess.Popen :type cmd: string or list :param kw: keyword arguments for subprocess.Popen. The parameters input/timeout will be passed to wait/communicate. :type kw: dict :returns: process exit status :rtype: integer :raises: :py:class:`waflib.Errors.WafError` if an invalid executable is specified for a non-shell process :raises: :py:class:`waflib.Errors.WafError` in case of execution failure """ subprocess = Utils.subprocess kw['shell'] = isinstance(cmd, str) self.log_command(cmd, kw) if self.logger: self.logger.info(cmd) if 'stdout' not in kw: kw['stdout'] = subprocess.PIPE if 'stderr' not in kw: kw['stderr'] = subprocess.PIPE if Logs.verbose and not kw['shell'] and not Utils.check_exe(cmd[0]): raise Errors.WafError('Program %s not found!' % cmd[0]) cargs = {} if 'timeout' in kw: if sys.hexversion >= 0x3030000: cargs['timeout'] = kw['timeout'] if not 'start_new_session' in kw: kw['start_new_session'] = True del kw['timeout'] if 'input' in kw: if kw['input']: cargs['input'] = kw['input'] kw['stdin'] = subprocess.PIPE del kw['input'] if 'cwd' in kw: if not isinstance(kw['cwd'], str): kw['cwd'] = kw['cwd'].abspath() encoding = kw.pop('decode_as', default_encoding) try: ret, out, err = Utils.run_process(cmd, kw, cargs) except Exception as e: raise Errors.WafError('Execution failure: %s' % str(e), ex=e) if out: if not isinstance(out, str): out = out.decode(encoding, errors='replace') if self.logger: self.logger.debug('out: %s', out) else: Logs.info(out, extra={'stream':sys.stdout, 'c1': ''}) if err: if not isinstance(err, str): err = err.decode(encoding, errors='replace') if self.logger: self.logger.error('err: %s' % err) else: Logs.info(err, extra={'stream':sys.stderr, 'c1': ''}) return ret def cmd_and_log(self, cmd, **kw): """ Executes a process and returns stdout/stderr if the execution is successful. An exception is thrown when the exit status is non-0. In that case, both stderr and stdout will be bound to the WafError object (configuration tests):: def configure(conf): out = conf.cmd_and_log(['echo', 'hello'], output=waflib.Context.STDOUT, quiet=waflib.Context.BOTH) (out, err) = conf.cmd_and_log(['echo', 'hello'], output=waflib.Context.BOTH) (out, err) = conf.cmd_and_log(cmd, input='\\n'.encode(), output=waflib.Context.STDOUT) try: conf.cmd_and_log(['which', 'someapp'], output=waflib.Context.BOTH) except Errors.WafError as e: print(e.stdout, e.stderr) :param cmd: args for subprocess.Popen :type cmd: list or string :param kw: keyword arguments for subprocess.Popen. The parameters input/timeout will be passed to wait/communicate. :type kw: dict :returns: a tuple containing the contents of stdout and stderr :rtype: string :raises: :py:class:`waflib.Errors.WafError` if an invalid executable is specified for a non-shell process :raises: :py:class:`waflib.Errors.WafError` in case of execution failure; stdout/stderr/returncode are bound to the exception object """ subprocess = Utils.subprocess kw['shell'] = isinstance(cmd, str) self.log_command(cmd, kw) quiet = kw.pop('quiet', None) to_ret = kw.pop('output', STDOUT) if Logs.verbose and not kw['shell'] and not Utils.check_exe(cmd[0]): raise Errors.WafError('Program %r not found!' % cmd[0]) kw['stdout'] = kw['stderr'] = subprocess.PIPE if quiet is None: self.to_log(cmd) cargs = {} if 'timeout' in kw: if sys.hexversion >= 0x3030000: cargs['timeout'] = kw['timeout'] if not 'start_new_session' in kw: kw['start_new_session'] = True del kw['timeout'] if 'input' in kw: if kw['input']: cargs['input'] = kw['input'] kw['stdin'] = subprocess.PIPE del kw['input'] if 'cwd' in kw: if not isinstance(kw['cwd'], str): kw['cwd'] = kw['cwd'].abspath() encoding = kw.pop('decode_as', default_encoding) try: ret, out, err = Utils.run_process(cmd, kw, cargs) except Exception as e: raise Errors.WafError('Execution failure: %s' % str(e), ex=e) if not isinstance(out, str): out = out.decode(encoding, errors='replace') if not isinstance(err, str): err = err.decode(encoding, errors='replace') if out and quiet != STDOUT and quiet != BOTH: self.to_log('out: %s' % out) if err and quiet != STDERR and quiet != BOTH: self.to_log('err: %s' % err) if ret: e = Errors.WafError('Command %r returned %r' % (cmd, ret)) e.returncode = ret e.stderr = err e.stdout = out raise e if to_ret == BOTH: return (out, err) elif to_ret == STDERR: return err return out def fatal(self, msg, ex=None): """ Prints an error message in red and stops command execution; this is usually used in the configuration section:: def configure(conf): conf.fatal('a requirement is missing') :param msg: message to display :type msg: string :param ex: optional exception object :type ex: exception :raises: :py:class:`waflib.Errors.ConfigurationError` """ if self.logger: self.logger.info('from %s: %s' % (self.path.abspath(), msg)) try: logfile = self.logger.handlers[0].baseFilename except AttributeError: pass else: if os.environ.get('WAF_PRINT_FAILURE_LOG'): # see #1930 msg = 'Log from (%s):\n%s\n' % (logfile, Utils.readf(logfile)) else: msg = '%s\n(complete log in %s)' % (msg, logfile) raise self.errors.ConfigurationError(msg, ex=ex) def to_log(self, msg): """ Logs information to the logger (if present), or to stderr. Empty messages are not printed:: def build(bld): bld.to_log('starting the build') Provide a logger on the context class or override this method if necessary. :param msg: message :type msg: string """ if not msg: return if self.logger: self.logger.info(msg) else: sys.stderr.write(str(msg)) sys.stderr.flush() def msg(self, *k, **kw): """ Prints a configuration message of the form ``msg: result``. The second part of the message will be in colors. The output can be disabled easly by setting ``in_msg`` to a positive value:: def configure(conf): self.in_msg = 1 conf.msg('Checking for library foo', 'ok') # no output :param msg: message to display to the user :type msg: string :param result: result to display :type result: string or boolean :param color: color to use, see :py:const:`waflib.Logs.colors_lst` :type color: string """ try: msg = kw['msg'] except KeyError: msg = k[0] self.start_msg(msg, **kw) try: result = kw['result'] except KeyError: result = k[1] color = kw.get('color') if not isinstance(color, str): color = result and 'GREEN' or 'YELLOW' self.end_msg(result, color, **kw) def start_msg(self, *k, **kw): """ Prints the beginning of a 'Checking for xxx' message. See :py:meth:`waflib.Context.Context.msg` """ if kw.get('quiet'): return msg = kw.get('msg') or k[0] try: if self.in_msg: self.in_msg += 1 return except AttributeError: self.in_msg = 0 self.in_msg += 1 try: self.line_just = max(self.line_just, len(msg)) except AttributeError: self.line_just = max(40, len(msg)) for x in (self.line_just * '-', msg): self.to_log(x) Logs.pprint('NORMAL', "%s :" % msg.ljust(self.line_just), sep='') def end_msg(self, *k, **kw): """Prints the end of a 'Checking for' message. See :py:meth:`waflib.Context.Context.msg`""" if kw.get('quiet'): return self.in_msg -= 1 if self.in_msg: return result = kw.get('result') or k[0] defcolor = 'GREEN' if result is True: msg = 'ok' elif not result: msg = 'not found' defcolor = 'YELLOW' else: msg = str(result) self.to_log(msg) try: color = kw['color'] except KeyError: if len(k) > 1 and k[1] in Logs.colors_lst: # compatibility waf 1.7 color = k[1] else: color = defcolor Logs.pprint(color, msg) def load_special_tools(self, var, ban=[]): """ Loads third-party extensions modules for certain programming languages by trying to list certain files in the extras/ directory. This method is typically called once for a programming language group, see for example :py:mod:`waflib.Tools.compiler_c` :param var: glob expression, for example 'cxx\\_\\*.py' :type var: string :param ban: list of exact file names to exclude :type ban: list of string """ if os.path.isdir(waf_dir): lst = self.root.find_node(waf_dir).find_node('waflib/extras').ant_glob(var) for x in lst: if not x.name in ban: load_tool(x.name.replace('.py', '')) else: from zipfile import PyZipFile waflibs = PyZipFile(waf_dir) lst = waflibs.namelist() for x in lst: if not re.match('waflib/extras/%s' % var.replace('*', '.*'), var): continue f = os.path.basename(x) doban = False for b in ban: r = b.replace('*', '.*') if re.match(r, f): doban = True if not doban: f = f.replace('.py', '') load_tool(f) cache_modules = {} """ Dictionary holding already loaded modules (wscript), indexed by their absolute path. The modules are added automatically by :py:func:`waflib.Context.load_module` """ def load_module(path, encoding=None): """ Loads a wscript file as a python module. This method caches results in :py:attr:`waflib.Context.cache_modules` :param path: file path :type path: string :return: Loaded Python module :rtype: module """ try: return cache_modules[path] except KeyError: pass module = imp.new_module(WSCRIPT_FILE) try: code = Utils.readf(path, m='r', encoding=encoding) except EnvironmentError: raise Errors.WafError('Could not read the file %r' % path) module_dir = os.path.dirname(path) sys.path.insert(0, module_dir) try: exec(compile(code, path, 'exec'), module.__dict__) finally: sys.path.remove(module_dir) cache_modules[path] = module return module def load_tool(tool, tooldir=None, ctx=None, with_sys_path=True): """ Imports a Waf tool as a python module, and stores it in the dict :py:const:`waflib.Context.Context.tools` :type tool: string :param tool: Name of the tool :type tooldir: list :param tooldir: List of directories to search for the tool module :type with_sys_path: boolean :param with_sys_path: whether or not to search the regular sys.path, besides waf_dir and potentially given tooldirs """ if tool == 'java': tool = 'javaw' # jython else: tool = tool.replace('++', 'xx') if not with_sys_path: back_path = sys.path sys.path = [] try: if tooldir: assert isinstance(tooldir, list) sys.path = tooldir + sys.path try: __import__(tool) except ImportError as e: e.waf_sys_path = list(sys.path) raise finally: for d in tooldir: sys.path.remove(d) ret = sys.modules[tool] Context.tools[tool] = ret return ret else: if not with_sys_path: sys.path.insert(0, waf_dir) try: for x in ('waflib.Tools.%s', 'waflib.extras.%s', 'waflib.%s', '%s'): try: __import__(x % tool) break except ImportError: x = None else: # raise an exception __import__(tool) except ImportError as e: e.waf_sys_path = list(sys.path) raise finally: if not with_sys_path: sys.path.remove(waf_dir) ret = sys.modules[x % tool] Context.tools[tool] = ret return ret finally: if not with_sys_path: sys.path += back_path tdb-1.4.2/third_party/waf/waflib/Errors.py0000660000000000000000000000326113444661622020435 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2010-2018 (ita) """ Exceptions used in the Waf code """ import traceback, sys class WafError(Exception): """Base class for all Waf errors""" def __init__(self, msg='', ex=None): """ :param msg: error message :type msg: string :param ex: exception causing this error (optional) :type ex: exception """ Exception.__init__(self) self.msg = msg assert not isinstance(msg, Exception) self.stack = [] if ex: if not msg: self.msg = str(ex) if isinstance(ex, WafError): self.stack = ex.stack else: self.stack = traceback.extract_tb(sys.exc_info()[2]) self.stack += traceback.extract_stack()[:-1] self.verbose_msg = ''.join(traceback.format_list(self.stack)) def __str__(self): return str(self.msg) class BuildError(WafError): """Error raised during the build and install phases""" def __init__(self, error_tasks=[]): """ :param error_tasks: tasks that could not complete normally :type error_tasks: list of task objects """ self.tasks = error_tasks WafError.__init__(self, self.format_error()) def format_error(self): """Formats the error messages from the tasks that failed""" lst = ['Build failed'] for tsk in self.tasks: txt = tsk.format_error() if txt: lst.append(txt) return '\n'.join(lst) class ConfigurationError(WafError): """Configuration exception raised in particular by :py:meth:`waflib.Context.Context.fatal`""" pass class TaskRescan(WafError): """Task-specific exception type signalling required signature recalculations""" pass class TaskNotReady(WafError): """Task-specific exception type signalling that task signatures cannot be computed""" pass tdb-1.4.2/third_party/waf/waflib/Logs.py0000660000000000000000000002303313527011455020057 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005-2018 (ita) """ logging, colors, terminal width and pretty-print """ import os, re, traceback, sys from waflib import Utils, ansiterm if not os.environ.get('NOSYNC', False): # synchronized output is nearly mandatory to prevent garbled output if sys.stdout.isatty() and id(sys.stdout) == id(sys.__stdout__): sys.stdout = ansiterm.AnsiTerm(sys.stdout) if sys.stderr.isatty() and id(sys.stderr) == id(sys.__stderr__): sys.stderr = ansiterm.AnsiTerm(sys.stderr) # import the logging module after since it holds a reference on sys.stderr # in case someone uses the root logger import logging LOG_FORMAT = os.environ.get('WAF_LOG_FORMAT', '%(asctime)s %(c1)s%(zone)s%(c2)s %(message)s') HOUR_FORMAT = os.environ.get('WAF_HOUR_FORMAT', '%H:%M:%S') zones = [] """ See :py:class:`waflib.Logs.log_filter` """ verbose = 0 """ Global verbosity level, see :py:func:`waflib.Logs.debug` and :py:func:`waflib.Logs.error` """ colors_lst = { 'USE' : True, 'BOLD' :'\x1b[01;1m', 'RED' :'\x1b[01;31m', 'GREEN' :'\x1b[32m', 'YELLOW':'\x1b[33m', 'PINK' :'\x1b[35m', 'BLUE' :'\x1b[01;34m', 'CYAN' :'\x1b[36m', 'GREY' :'\x1b[37m', 'NORMAL':'\x1b[0m', 'cursor_on' :'\x1b[?25h', 'cursor_off' :'\x1b[?25l', } indicator = '\r\x1b[K%s%s%s' try: unicode except NameError: unicode = None def enable_colors(use): """ If *1* is given, then the system will perform a few verifications before enabling colors, such as checking whether the interpreter is running in a terminal. A value of zero will disable colors, and a value above *1* will force colors. :param use: whether to enable colors or not :type use: integer """ if use == 1: if not (sys.stderr.isatty() or sys.stdout.isatty()): use = 0 if Utils.is_win32 and os.name != 'java': term = os.environ.get('TERM', '') # has ansiterm else: term = os.environ.get('TERM', 'dumb') if term in ('dumb', 'emacs'): use = 0 if use >= 1: os.environ['TERM'] = 'vt100' colors_lst['USE'] = use # If console packages are available, replace the dummy function with a real # implementation try: get_term_cols = ansiterm.get_term_cols except AttributeError: def get_term_cols(): return 80 get_term_cols.__doc__ = """ Returns the console width in characters. :return: the number of characters per line :rtype: int """ def get_color(cl): """ Returns the ansi sequence corresponding to the given color name. An empty string is returned when coloring is globally disabled. :param cl: color name in capital letters :type cl: string """ if colors_lst['USE']: return colors_lst.get(cl, '') return '' class color_dict(object): """attribute-based color access, eg: colors.PINK""" def __getattr__(self, a): return get_color(a) def __call__(self, a): return get_color(a) colors = color_dict() re_log = re.compile(r'(\w+): (.*)', re.M) class log_filter(logging.Filter): """ Waf logs are of the form 'name: message', and can be filtered by 'waf --zones=name'. For example, the following:: from waflib import Logs Logs.debug('test: here is a message') Will be displayed only when executing:: $ waf --zones=test """ def __init__(self, name=''): logging.Filter.__init__(self, name) def filter(self, rec): """ Filters log records by zone and by logging level :param rec: log entry """ rec.zone = rec.module if rec.levelno >= logging.INFO: return True m = re_log.match(rec.msg) if m: rec.zone = m.group(1) rec.msg = m.group(2) if zones: return getattr(rec, 'zone', '') in zones or '*' in zones elif not verbose > 2: return False return True class log_handler(logging.StreamHandler): """Dispatches messages to stderr/stdout depending on the severity level""" def emit(self, record): """ Delegates the functionality to :py:meth:`waflib.Log.log_handler.emit_override` """ # default implementation try: try: self.stream = record.stream except AttributeError: if record.levelno >= logging.WARNING: record.stream = self.stream = sys.stderr else: record.stream = self.stream = sys.stdout self.emit_override(record) self.flush() except (KeyboardInterrupt, SystemExit): raise except: # from the python library -_- self.handleError(record) def emit_override(self, record, **kw): """ Writes the log record to the desired stream (stderr/stdout) """ self.terminator = getattr(record, 'terminator', '\n') stream = self.stream if unicode: # python2 msg = self.formatter.format(record) fs = '%s' + self.terminator try: if (isinstance(msg, unicode) and getattr(stream, 'encoding', None)): fs = fs.decode(stream.encoding) try: stream.write(fs % msg) except UnicodeEncodeError: stream.write((fs % msg).encode(stream.encoding)) else: stream.write(fs % msg) except UnicodeError: stream.write((fs % msg).encode('utf-8')) else: logging.StreamHandler.emit(self, record) class formatter(logging.Formatter): """Simple log formatter which handles colors""" def __init__(self): logging.Formatter.__init__(self, LOG_FORMAT, HOUR_FORMAT) def format(self, rec): """ Formats records and adds colors as needed. The records do not get a leading hour format if the logging level is above *INFO*. """ try: msg = rec.msg.decode('utf-8') except Exception: msg = rec.msg use = colors_lst['USE'] if (use == 1 and rec.stream.isatty()) or use == 2: c1 = getattr(rec, 'c1', None) if c1 is None: c1 = '' if rec.levelno >= logging.ERROR: c1 = colors.RED elif rec.levelno >= logging.WARNING: c1 = colors.YELLOW elif rec.levelno >= logging.INFO: c1 = colors.GREEN c2 = getattr(rec, 'c2', colors.NORMAL) msg = '%s%s%s' % (c1, msg, c2) else: # remove single \r that make long lines in text files # and other terminal commands msg = re.sub(r'\r(?!\n)|\x1B\[(K|.*?(m|h|l))', '', msg) if rec.levelno >= logging.INFO: # the goal of this is to format without the leading "Logs, hour" prefix if rec.args: try: return msg % rec.args except UnicodeDecodeError: return msg.encode('utf-8') % rec.args return msg rec.msg = msg rec.c1 = colors.PINK rec.c2 = colors.NORMAL return logging.Formatter.format(self, rec) log = None """global logger for Logs.debug, Logs.error, etc""" def debug(*k, **kw): """ Wraps logging.debug and discards messages if the verbosity level :py:attr:`waflib.Logs.verbose` ≤ 0 """ if verbose: k = list(k) k[0] = k[0].replace('\n', ' ') log.debug(*k, **kw) def error(*k, **kw): """ Wrap logging.errors, adds the stack trace when the verbosity level :py:attr:`waflib.Logs.verbose` ≥ 2 """ log.error(*k, **kw) if verbose > 2: st = traceback.extract_stack() if st: st = st[:-1] buf = [] for filename, lineno, name, line in st: buf.append(' File %r, line %d, in %s' % (filename, lineno, name)) if line: buf.append(' %s' % line.strip()) if buf: log.error('\n'.join(buf)) def warn(*k, **kw): """ Wraps logging.warning """ log.warning(*k, **kw) def info(*k, **kw): """ Wraps logging.info """ log.info(*k, **kw) def init_log(): """ Initializes the logger :py:attr:`waflib.Logs.log` """ global log log = logging.getLogger('waflib') log.handlers = [] log.filters = [] hdlr = log_handler() hdlr.setFormatter(formatter()) log.addHandler(hdlr) log.addFilter(log_filter()) log.setLevel(logging.DEBUG) def make_logger(path, name): """ Creates a simple logger, which is often used to redirect the context command output:: from waflib import Logs bld.logger = Logs.make_logger('test.log', 'build') bld.check(header_name='sadlib.h', features='cxx cprogram', mandatory=False) # have the file closed immediately Logs.free_logger(bld.logger) # stop logging bld.logger = None The method finalize() of the command will try to free the logger, if any :param path: file name to write the log output to :type path: string :param name: logger name (loggers are reused) :type name: string """ logger = logging.getLogger(name) if sys.hexversion > 0x3000000: encoding = sys.stdout.encoding else: encoding = None hdlr = logging.FileHandler(path, 'w', encoding=encoding) formatter = logging.Formatter('%(message)s') hdlr.setFormatter(formatter) logger.addHandler(hdlr) logger.setLevel(logging.DEBUG) return logger def make_mem_logger(name, to_log, size=8192): """ Creates a memory logger to avoid writing concurrently to the main logger """ from logging.handlers import MemoryHandler logger = logging.getLogger(name) hdlr = MemoryHandler(size, target=to_log) formatter = logging.Formatter('%(message)s') hdlr.setFormatter(formatter) logger.addHandler(hdlr) logger.memhandler = hdlr logger.setLevel(logging.DEBUG) return logger def free_logger(logger): """ Frees the resources held by the loggers created through make_logger or make_mem_logger. This is used for file cleanup and for handler removal (logger objects are re-used). """ try: for x in logger.handlers: x.close() logger.removeHandler(x) except Exception: pass def pprint(col, msg, label='', sep='\n'): """ Prints messages in color immediately on stderr:: from waflib import Logs Logs.pprint('RED', 'Something bad just happened') :param col: color name to use in :py:const:`Logs.colors_lst` :type col: string :param msg: message to display :type msg: string or a value that can be printed by %s :param label: a message to add after the colored output :type label: string :param sep: a string to append at the end (line separator) :type sep: string """ info('%s%s%s %s', colors(col), msg, colors.NORMAL, label, extra={'terminator':sep}) tdb-1.4.2/third_party/waf/waflib/Node.py0000660000000000000000000006162513527011455020051 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005-2018 (ita) """ Node: filesystem structure #. Each file/folder is represented by exactly one node. #. Some potential class properties are stored on :py:class:`waflib.Build.BuildContext` : nodes to depend on, etc. Unused class members can increase the `.wafpickle` file size sensibly. #. Node objects should never be created directly, use the methods :py:func:`Node.make_node` or :py:func:`Node.find_node` for the low-level operations #. The methods :py:func:`Node.find_resource`, :py:func:`Node.find_dir` :py:func:`Node.find_or_declare` must be used when a build context is present #. Each instance of :py:class:`waflib.Context.Context` has a unique :py:class:`Node` subclass required for serialization. (:py:class:`waflib.Node.Nod3`, see the :py:class:`waflib.Context.Context` initializer). A reference to the context owning a node is held as *self.ctx* """ import os, re, sys, shutil from waflib import Utils, Errors exclude_regs = ''' **/*~ **/#*# **/.#* **/%*% **/._* **/*.swp **/CVS **/CVS/** **/.cvsignore **/SCCS **/SCCS/** **/vssver.scc **/.svn **/.svn/** **/BitKeeper **/.git **/.git/** **/.gitignore **/.bzr **/.bzrignore **/.bzr/** **/.hg **/.hg/** **/_MTN **/_MTN/** **/.arch-ids **/{arch} **/_darcs **/_darcs/** **/.intlcache **/.DS_Store''' """ Ant patterns for files and folders to exclude while doing the recursive traversal in :py:meth:`waflib.Node.Node.ant_glob` """ def ant_matcher(s, ignorecase): reflags = re.I if ignorecase else 0 ret = [] for x in Utils.to_list(s): x = x.replace('\\', '/').replace('//', '/') if x.endswith('/'): x += '**' accu = [] for k in x.split('/'): if k == '**': accu.append(k) else: k = k.replace('.', '[.]').replace('*', '.*').replace('?', '.').replace('+', '\\+') k = '^%s$' % k try: exp = re.compile(k, flags=reflags) except Exception as e: raise Errors.WafError('Invalid pattern: %s' % k, e) else: accu.append(exp) ret.append(accu) return ret def ant_sub_filter(name, nn): ret = [] for lst in nn: if not lst: pass elif lst[0] == '**': ret.append(lst) if len(lst) > 1: if lst[1].match(name): ret.append(lst[2:]) else: ret.append([]) elif lst[0].match(name): ret.append(lst[1:]) return ret def ant_sub_matcher(name, pats): nacc = ant_sub_filter(name, pats[0]) nrej = ant_sub_filter(name, pats[1]) if [] in nrej: nacc = [] return [nacc, nrej] class Node(object): """ This class is organized in two parts: * The basic methods meant for filesystem access (compute paths, create folders, etc) * The methods bound to a :py:class:`waflib.Build.BuildContext` (require ``bld.srcnode`` and ``bld.bldnode``) """ dict_class = dict """ Subclasses can provide a dict class to enable case insensitivity for example. """ __slots__ = ('name', 'parent', 'children', 'cache_abspath', 'cache_isdir') def __init__(self, name, parent): """ .. note:: Use :py:func:`Node.make_node` or :py:func:`Node.find_node` instead of calling this constructor """ self.name = name self.parent = parent if parent: if name in parent.children: raise Errors.WafError('node %s exists in the parent files %r already' % (name, parent)) parent.children[name] = self def __setstate__(self, data): "Deserializes node information, used for persistence" self.name = data[0] self.parent = data[1] if data[2] is not None: # Issue 1480 self.children = self.dict_class(data[2]) def __getstate__(self): "Serializes node information, used for persistence" return (self.name, self.parent, getattr(self, 'children', None)) def __str__(self): """ String representation (abspath), for debugging purposes :rtype: string """ return self.abspath() def __repr__(self): """ String representation (abspath), for debugging purposes :rtype: string """ return self.abspath() def __copy__(self): """ Provided to prevent nodes from being copied :raises: :py:class:`waflib.Errors.WafError` """ raise Errors.WafError('nodes are not supposed to be copied') def read(self, flags='r', encoding='latin-1'): """ Reads and returns the contents of the file represented by this node, see :py:func:`waflib.Utils.readf`:: def build(bld): bld.path.find_node('wscript').read() :param flags: Open mode :type flags: string :param encoding: encoding value for Python3 :type encoding: string :rtype: string or bytes :return: File contents """ return Utils.readf(self.abspath(), flags, encoding) def write(self, data, flags='w', encoding='latin-1'): """ Writes data to the file represented by this node, see :py:func:`waflib.Utils.writef`:: def build(bld): bld.path.make_node('foo.txt').write('Hello, world!') :param data: data to write :type data: string :param flags: Write mode :type flags: string :param encoding: encoding value for Python3 :type encoding: string """ Utils.writef(self.abspath(), data, flags, encoding) def read_json(self, convert=True, encoding='utf-8'): """ Reads and parses the contents of this node as JSON (Python ≥ 2.6):: def build(bld): bld.path.find_node('abc.json').read_json() Note that this by default automatically decodes unicode strings on Python2, unlike what the Python JSON module does. :type convert: boolean :param convert: Prevents decoding of unicode strings on Python2 :type encoding: string :param encoding: The encoding of the file to read. This default to UTF8 as per the JSON standard :rtype: object :return: Parsed file contents """ import json # Python 2.6 and up object_pairs_hook = None if convert and sys.hexversion < 0x3000000: try: _type = unicode except NameError: _type = str def convert(value): if isinstance(value, list): return [convert(element) for element in value] elif isinstance(value, _type): return str(value) else: return value def object_pairs(pairs): return dict((str(pair[0]), convert(pair[1])) for pair in pairs) object_pairs_hook = object_pairs return json.loads(self.read(encoding=encoding), object_pairs_hook=object_pairs_hook) def write_json(self, data, pretty=True): """ Writes a python object as JSON to disk (Python ≥ 2.6) as UTF-8 data (JSON standard):: def build(bld): bld.path.find_node('xyz.json').write_json(199) :type data: object :param data: The data to write to disk :type pretty: boolean :param pretty: Determines if the JSON will be nicely space separated """ import json # Python 2.6 and up indent = 2 separators = (',', ': ') sort_keys = pretty newline = os.linesep if not pretty: indent = None separators = (',', ':') newline = '' output = json.dumps(data, indent=indent, separators=separators, sort_keys=sort_keys) + newline self.write(output, encoding='utf-8') def exists(self): """ Returns whether the Node is present on the filesystem :rtype: bool """ return os.path.exists(self.abspath()) def isdir(self): """ Returns whether the Node represents a folder :rtype: bool """ return os.path.isdir(self.abspath()) def chmod(self, val): """ Changes the file/dir permissions:: def build(bld): bld.path.chmod(493) # 0755 """ os.chmod(self.abspath(), val) def delete(self, evict=True): """ Removes the file/folder from the filesystem (equivalent to `rm -rf`), and remove this object from the Node tree. Do not use this object after calling this method. """ try: try: if os.path.isdir(self.abspath()): shutil.rmtree(self.abspath()) else: os.remove(self.abspath()) except OSError: if os.path.exists(self.abspath()): raise finally: if evict: self.evict() def evict(self): """ Removes this node from the Node tree """ del self.parent.children[self.name] def suffix(self): """ Returns the file rightmost extension, for example `a.b.c.d → .d` :rtype: string """ k = max(0, self.name.rfind('.')) return self.name[k:] def height(self): """ Returns the depth in the folder hierarchy from the filesystem root or from all the file drives :returns: filesystem depth :rtype: integer """ d = self val = -1 while d: d = d.parent val += 1 return val def listdir(self): """ Lists the folder contents :returns: list of file/folder names ordered alphabetically :rtype: list of string """ lst = Utils.listdir(self.abspath()) lst.sort() return lst def mkdir(self): """ Creates a folder represented by this node. Intermediate folders are created as needed. :raises: :py:class:`waflib.Errors.WafError` when the folder is missing """ if self.isdir(): return try: self.parent.mkdir() except OSError: pass if self.name: try: os.makedirs(self.abspath()) except OSError: pass if not self.isdir(): raise Errors.WafError('Could not create the directory %r' % self) try: self.children except AttributeError: self.children = self.dict_class() def find_node(self, lst): """ Finds a node on the file system (files or folders), and creates the corresponding Node objects if it exists :param lst: relative path :type lst: string or list of string :returns: The corresponding Node object or None if no entry was found on the filesystem :rtype: :py:class:´waflib.Node.Node´ """ if isinstance(lst, str): lst = [x for x in Utils.split_path(lst) if x and x != '.'] if lst and lst[0].startswith('\\\\') and not self.parent: node = self.ctx.root.make_node(lst[0]) node.cache_isdir = True return node.find_node(lst[1:]) cur = self for x in lst: if x == '..': cur = cur.parent or cur continue try: ch = cur.children except AttributeError: cur.children = self.dict_class() else: try: cur = ch[x] continue except KeyError: pass # optimistic: create the node first then look if it was correct to do so cur = self.__class__(x, cur) if not cur.exists(): cur.evict() return None if not cur.exists(): cur.evict() return None return cur def make_node(self, lst): """ Returns or creates a Node object corresponding to the input path without considering the filesystem. :param lst: relative path :type lst: string or list of string :rtype: :py:class:´waflib.Node.Node´ """ if isinstance(lst, str): lst = [x for x in Utils.split_path(lst) if x and x != '.'] cur = self for x in lst: if x == '..': cur = cur.parent or cur continue try: cur = cur.children[x] except AttributeError: cur.children = self.dict_class() except KeyError: pass else: continue cur = self.__class__(x, cur) return cur def search_node(self, lst): """ Returns a Node previously defined in the data structure. The filesystem is not considered. :param lst: relative path :type lst: string or list of string :rtype: :py:class:´waflib.Node.Node´ or None if there is no entry in the Node datastructure """ if isinstance(lst, str): lst = [x for x in Utils.split_path(lst) if x and x != '.'] cur = self for x in lst: if x == '..': cur = cur.parent or cur else: try: cur = cur.children[x] except (AttributeError, KeyError): return None return cur def path_from(self, node): """ Path of this node seen from the other:: def build(bld): n1 = bld.path.find_node('foo/bar/xyz.txt') n2 = bld.path.find_node('foo/stuff/') n1.path_from(n2) # '../bar/xyz.txt' :param node: path to use as a reference :type node: :py:class:`waflib.Node.Node` :returns: a relative path or an absolute one if that is better :rtype: string """ c1 = self c2 = node c1h = c1.height() c2h = c2.height() lst = [] up = 0 while c1h > c2h: lst.append(c1.name) c1 = c1.parent c1h -= 1 while c2h > c1h: up += 1 c2 = c2.parent c2h -= 1 while not c1 is c2: lst.append(c1.name) up += 1 c1 = c1.parent c2 = c2.parent if c1.parent: lst.extend(['..'] * up) lst.reverse() return os.sep.join(lst) or '.' else: return self.abspath() def abspath(self): """ Returns the absolute path. A cache is kept in the context as ``cache_node_abspath`` :rtype: string """ try: return self.cache_abspath except AttributeError: pass # think twice before touching this (performance + complexity + correctness) if not self.parent: val = os.sep elif not self.parent.name: val = os.sep + self.name else: val = self.parent.abspath() + os.sep + self.name self.cache_abspath = val return val if Utils.is_win32: def abspath(self): try: return self.cache_abspath except AttributeError: pass if not self.parent: val = '' elif not self.parent.name: val = self.name + os.sep else: val = self.parent.abspath().rstrip(os.sep) + os.sep + self.name self.cache_abspath = val return val def is_child_of(self, node): """ Returns whether the object belongs to a subtree of the input node:: def build(bld): node = bld.path.find_node('wscript') node.is_child_of(bld.path) # True :param node: path to use as a reference :type node: :py:class:`waflib.Node.Node` :rtype: bool """ p = self diff = self.height() - node.height() while diff > 0: diff -= 1 p = p.parent return p is node def ant_iter(self, accept=None, maxdepth=25, pats=[], dir=False, src=True, remove=True, quiet=False): """ Recursive method used by :py:meth:`waflib.Node.ant_glob`. :param accept: function used for accepting/rejecting a node, returns the patterns that can be still accepted in recursion :type accept: function :param maxdepth: maximum depth in the filesystem (25) :type maxdepth: int :param pats: list of patterns to accept and list of patterns to exclude :type pats: tuple :param dir: return folders too (False by default) :type dir: bool :param src: return files (True by default) :type src: bool :param remove: remove files/folders that do not exist (True by default) :type remove: bool :param quiet: disable build directory traversal warnings (verbose mode) :type quiet: bool :returns: A generator object to iterate from :rtype: iterator """ dircont = self.listdir() try: lst = set(self.children.keys()) except AttributeError: self.children = self.dict_class() else: if remove: for x in lst - set(dircont): self.children[x].evict() for name in dircont: npats = accept(name, pats) if npats and npats[0]: accepted = [] in npats[0] node = self.make_node([name]) isdir = node.isdir() if accepted: if isdir: if dir: yield node elif src: yield node if isdir: node.cache_isdir = True if maxdepth: for k in node.ant_iter(accept=accept, maxdepth=maxdepth - 1, pats=npats, dir=dir, src=src, remove=remove, quiet=quiet): yield k def ant_glob(self, *k, **kw): """ Finds files across folders and returns Node objects: * ``**/*`` find all files recursively * ``**/*.class`` find all files ending by .class * ``..`` find files having two dot characters For example:: def configure(cfg): # find all .cpp files cfg.path.ant_glob('**/*.cpp') # find particular files from the root filesystem (can be slow) cfg.root.ant_glob('etc/*.txt') # simple exclusion rule example cfg.path.ant_glob('*.c*', excl=['*.c'], src=True, dir=False) For more information about the patterns, consult http://ant.apache.org/manual/dirtasks.html Please remember that the '..' sequence does not represent the parent directory:: def configure(cfg): cfg.path.ant_glob('../*.h') # incorrect cfg.path.parent.ant_glob('*.h') # correct The Node structure is itself a filesystem cache, so certain precautions must be taken while matching files in the build or installation phases. Nodes objects that do have a corresponding file or folder are garbage-collected by default. This garbage collection is usually required to prevent returning files that do not exist anymore. Yet, this may also remove Node objects of files that are yet-to-be built. This typically happens when trying to match files in the build directory, but there are also cases when files are created in the source directory. Run ``waf -v`` to display any warnings, and try consider passing ``remove=False`` when matching files in the build directory. Since ant_glob can traverse both source and build folders, it is a best practice to call this method only from the most specific build node:: def build(bld): # traverses the build directory, may need ``remove=False``: bld.path.ant_glob('project/dir/**/*.h') # better, no accidental build directory traversal: bld.path.find_node('project/dir').ant_glob('**/*.h') # best In addition, files and folders are listed immediately. When matching files in the build folders, consider passing ``generator=True`` so that the generator object returned can defer computation to a later stage. For example:: def build(bld): bld(rule='tar xvf ${SRC}', source='arch.tar') bld.add_group() gen = bld.bldnode.ant_glob("*.h", generator=True, remove=True) # files will be listed only after the arch.tar is unpacked bld(rule='ls ${SRC}', source=gen, name='XYZ') :param incl: ant patterns or list of patterns to include :type incl: string or list of strings :param excl: ant patterns or list of patterns to exclude :type excl: string or list of strings :param dir: return folders too (False by default) :type dir: bool :param src: return files (True by default) :type src: bool :param maxdepth: maximum depth of recursion :type maxdepth: int :param ignorecase: ignore case while matching (False by default) :type ignorecase: bool :param generator: Whether to evaluate the Nodes lazily :type generator: bool :param remove: remove files/folders that do not exist (True by default) :type remove: bool :param quiet: disable build directory traversal warnings (verbose mode) :type quiet: bool :returns: The corresponding Node objects as a list or as a generator object (generator=True) :rtype: by default, list of :py:class:`waflib.Node.Node` instances """ src = kw.get('src', True) dir = kw.get('dir') excl = kw.get('excl', exclude_regs) incl = k and k[0] or kw.get('incl', '**') remove = kw.get('remove', True) maxdepth = kw.get('maxdepth', 25) ignorecase = kw.get('ignorecase', False) quiet = kw.get('quiet', False) pats = (ant_matcher(incl, ignorecase), ant_matcher(excl, ignorecase)) if kw.get('generator'): return Utils.lazy_generator(self.ant_iter, (ant_sub_matcher, maxdepth, pats, dir, src, remove, quiet)) it = self.ant_iter(ant_sub_matcher, maxdepth, pats, dir, src, remove, quiet) if kw.get('flat'): # returns relative paths as a space-delimited string # prefer Node objects whenever possible return ' '.join(x.path_from(self) for x in it) return list(it) # ---------------------------------------------------------------------------- # the methods below require the source/build folders (bld.srcnode/bld.bldnode) def is_src(self): """ Returns True if the node is below the source directory. Note that ``!is_src() ≠ is_bld()`` :rtype: bool """ cur = self x = self.ctx.srcnode y = self.ctx.bldnode while cur.parent: if cur is y: return False if cur is x: return True cur = cur.parent return False def is_bld(self): """ Returns True if the node is below the build directory. Note that ``!is_bld() ≠ is_src()`` :rtype: bool """ cur = self y = self.ctx.bldnode while cur.parent: if cur is y: return True cur = cur.parent return False def get_src(self): """ Returns the corresponding Node object in the source directory (or self if already under the source directory). Use this method only if the purpose is to create a Node object (this is common with folders but not with files, see ticket 1937) :rtype: :py:class:`waflib.Node.Node` """ cur = self x = self.ctx.srcnode y = self.ctx.bldnode lst = [] while cur.parent: if cur is y: lst.reverse() return x.make_node(lst) if cur is x: return self lst.append(cur.name) cur = cur.parent return self def get_bld(self): """ Return the corresponding Node object in the build directory (or self if already under the build directory). Use this method only if the purpose is to create a Node object (this is common with folders but not with files, see ticket 1937) :rtype: :py:class:`waflib.Node.Node` """ cur = self x = self.ctx.srcnode y = self.ctx.bldnode lst = [] while cur.parent: if cur is y: return self if cur is x: lst.reverse() return self.ctx.bldnode.make_node(lst) lst.append(cur.name) cur = cur.parent # the file is external to the current project, make a fake root in the current build directory lst.reverse() if lst and Utils.is_win32 and len(lst[0]) == 2 and lst[0].endswith(':'): lst[0] = lst[0][0] return self.ctx.bldnode.make_node(['__root__'] + lst) def find_resource(self, lst): """ Use this method in the build phase to find source files corresponding to the relative path given. First it looks up the Node data structure to find any declared Node object in the build directory. If None is found, it then considers the filesystem in the source directory. :param lst: relative path :type lst: string or list of string :returns: the corresponding Node object or None :rtype: :py:class:`waflib.Node.Node` """ if isinstance(lst, str): lst = [x for x in Utils.split_path(lst) if x and x != '.'] node = self.get_bld().search_node(lst) if not node: node = self.get_src().find_node(lst) if node and node.isdir(): return None return node def find_or_declare(self, lst): """ Use this method in the build phase to declare output files which are meant to be written in the build directory. This method creates the Node object and its parent folder as needed. :param lst: relative path :type lst: string or list of string """ if isinstance(lst, str) and os.path.isabs(lst): node = self.ctx.root.make_node(lst) else: node = self.get_bld().make_node(lst) node.parent.mkdir() return node def find_dir(self, lst): """ Searches for a folder on the filesystem (see :py:meth:`waflib.Node.Node.find_node`) :param lst: relative path :type lst: string or list of string :returns: The corresponding Node object or None if there is no such folder :rtype: :py:class:`waflib.Node.Node` """ if isinstance(lst, str): lst = [x for x in Utils.split_path(lst) if x and x != '.'] node = self.find_node(lst) if node and not node.isdir(): return None return node # helpers for building things def change_ext(self, ext, ext_in=None): """ Declares a build node with a distinct extension; this is uses :py:meth:`waflib.Node.Node.find_or_declare` :return: A build node of the same path, but with a different extension :rtype: :py:class:`waflib.Node.Node` """ name = self.name if ext_in is None: k = name.rfind('.') if k >= 0: name = name[:k] + ext else: name = name + ext else: name = name[:- len(ext_in)] + ext return self.parent.find_or_declare([name]) def bldpath(self): """ Returns the relative path seen from the build directory ``src/foo.cpp`` :rtype: string """ return self.path_from(self.ctx.bldnode) def srcpath(self): """ Returns the relative path seen from the source directory ``../src/foo.cpp`` :rtype: string """ return self.path_from(self.ctx.srcnode) def relpath(self): """ If a file in the build directory, returns :py:meth:`waflib.Node.Node.bldpath`, else returns :py:meth:`waflib.Node.Node.srcpath` :rtype: string """ cur = self x = self.ctx.bldnode while cur.parent: if cur is x: return self.bldpath() cur = cur.parent return self.srcpath() def bld_dir(self): """ Equivalent to self.parent.bldpath() :rtype: string """ return self.parent.bldpath() def h_file(self): """ See :py:func:`waflib.Utils.h_file` :return: a hash representing the file contents :rtype: string or bytes """ return Utils.h_file(self.abspath()) def get_bld_sig(self): """ Returns a signature (see :py:meth:`waflib.Node.Node.h_file`) for the purpose of build dependency calculation. This method uses a per-context cache. :return: a hash representing the object contents :rtype: string or bytes """ # previous behaviour can be set by returning self.ctx.node_sigs[self] when a build node try: cache = self.ctx.cache_sig except AttributeError: cache = self.ctx.cache_sig = {} try: ret = cache[self] except KeyError: p = self.abspath() try: ret = cache[self] = self.h_file() except EnvironmentError: if self.isdir(): # allow folders as build nodes, do not use the creation time st = os.stat(p) ret = cache[self] = Utils.h_list([p, st.st_ino, st.st_mode]) return ret raise return ret pickle_lock = Utils.threading.Lock() """Lock mandatory for thread-safe node serialization""" class Nod3(Node): """Mandatory subclass for thread-safe node serialization""" pass # do not remove tdb-1.4.2/third_party/waf/waflib/Options.py0000660000000000000000000002611713444661622020621 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Scott Newton, 2005 (scottn) # Thomas Nagy, 2006-2018 (ita) """ Support for waf command-line options Provides default and command-line options, as well the command that reads the ``options`` wscript function. """ import os, tempfile, optparse, sys, re from waflib import Logs, Utils, Context, Errors options = optparse.Values() """ A global dictionary representing user-provided command-line options:: $ waf --foo=bar """ commands = [] """ List of commands to execute extracted from the command-line. This list is consumed during the execution by :py:func:`waflib.Scripting.run_commands`. """ envvars = [] """ List of environment variable declarations placed after the Waf executable name. These are detected by searching for "=" in the remaining arguments. You probably do not want to use this. """ lockfile = os.environ.get('WAFLOCK', '.lock-waf_%s_build' % sys.platform) """ Name of the lock file that marks a project as configured """ class opt_parser(optparse.OptionParser): """ Command-line options parser. """ def __init__(self, ctx, allow_unknown=False): optparse.OptionParser.__init__(self, conflict_handler='resolve', add_help_option=False, version='waf %s (%s)' % (Context.WAFVERSION, Context.WAFREVISION)) self.formatter.width = Logs.get_term_cols() self.ctx = ctx self.allow_unknown = allow_unknown def _process_args(self, largs, rargs, values): """ Custom _process_args to allow unknown options according to the allow_unknown status """ while rargs: try: optparse.OptionParser._process_args(self,largs,rargs,values) except (optparse.BadOptionError, optparse.AmbiguousOptionError) as e: if self.allow_unknown: largs.append(e.opt_str) else: self.error(str(e)) def print_usage(self, file=None): return self.print_help(file) def get_usage(self): """ Builds the message to print on ``waf --help`` :rtype: string """ cmds_str = {} for cls in Context.classes: if not cls.cmd or cls.cmd == 'options' or cls.cmd.startswith( '_' ): continue s = cls.__doc__ or '' cmds_str[cls.cmd] = s if Context.g_module: for (k, v) in Context.g_module.__dict__.items(): if k in ('options', 'init', 'shutdown'): continue if type(v) is type(Context.create_context): if v.__doc__ and not k.startswith('_'): cmds_str[k] = v.__doc__ just = 0 for k in cmds_str: just = max(just, len(k)) lst = [' %s: %s' % (k.ljust(just), v) for (k, v) in cmds_str.items()] lst.sort() ret = '\n'.join(lst) return '''waf [commands] [options] Main commands (example: ./waf build -j4) %s ''' % ret class OptionsContext(Context.Context): """ Collects custom options from wscript files and parses the command line. Sets the global :py:const:`waflib.Options.commands` and :py:const:`waflib.Options.options` values. """ cmd = 'options' fun = 'options' def __init__(self, **kw): super(OptionsContext, self).__init__(**kw) self.parser = opt_parser(self) """Instance of :py:class:`waflib.Options.opt_parser`""" self.option_groups = {} jobs = self.jobs() p = self.add_option color = os.environ.get('NOCOLOR', '') and 'no' or 'auto' if os.environ.get('CLICOLOR', '') == '0': color = 'no' elif os.environ.get('CLICOLOR_FORCE', '') == '1': color = 'yes' p('-c', '--color', dest='colors', default=color, action='store', help='whether to use colors (yes/no/auto) [default: auto]', choices=('yes', 'no', 'auto')) p('-j', '--jobs', dest='jobs', default=jobs, type='int', help='amount of parallel jobs (%r)' % jobs) p('-k', '--keep', dest='keep', default=0, action='count', help='continue despite errors (-kk to try harder)') p('-v', '--verbose', dest='verbose', default=0, action='count', help='verbosity level -v -vv or -vvv [default: 0]') p('--zones', dest='zones', default='', action='store', help='debugging zones (task_gen, deps, tasks, etc)') p('--profile', dest='profile', default=0, action='store_true', help=optparse.SUPPRESS_HELP) p('--pdb', dest='pdb', default=0, action='store_true', help=optparse.SUPPRESS_HELP) p('-h', '--help', dest='whelp', default=0, action='store_true', help="show this help message and exit") gr = self.add_option_group('Configuration options') self.option_groups['configure options'] = gr gr.add_option('-o', '--out', action='store', default='', help='build dir for the project', dest='out') gr.add_option('-t', '--top', action='store', default='', help='src dir for the project', dest='top') gr.add_option('--no-lock-in-run', action='store_true', default='', help=optparse.SUPPRESS_HELP, dest='no_lock_in_run') gr.add_option('--no-lock-in-out', action='store_true', default='', help=optparse.SUPPRESS_HELP, dest='no_lock_in_out') gr.add_option('--no-lock-in-top', action='store_true', default='', help=optparse.SUPPRESS_HELP, dest='no_lock_in_top') default_prefix = getattr(Context.g_module, 'default_prefix', os.environ.get('PREFIX')) if not default_prefix: if Utils.unversioned_sys_platform() == 'win32': d = tempfile.gettempdir() default_prefix = d[0].upper() + d[1:] # win32 preserves the case, but gettempdir does not else: default_prefix = '/usr/local/' gr.add_option('--prefix', dest='prefix', default=default_prefix, help='installation prefix [default: %r]' % default_prefix) gr.add_option('--bindir', dest='bindir', help='bindir') gr.add_option('--libdir', dest='libdir', help='libdir') gr = self.add_option_group('Build and installation options') self.option_groups['build and install options'] = gr gr.add_option('-p', '--progress', dest='progress_bar', default=0, action='count', help= '-p: progress bar; -pp: ide output') gr.add_option('--targets', dest='targets', default='', action='store', help='task generators, e.g. "target1,target2"') gr = self.add_option_group('Step options') self.option_groups['step options'] = gr gr.add_option('--files', dest='files', default='', action='store', help='files to process, by regexp, e.g. "*/main.c,*/test/main.o"') default_destdir = os.environ.get('DESTDIR', '') gr = self.add_option_group('Installation and uninstallation options') self.option_groups['install/uninstall options'] = gr gr.add_option('--destdir', help='installation root [default: %r]' % default_destdir, default=default_destdir, dest='destdir') gr.add_option('-f', '--force', dest='force', default=False, action='store_true', help='force file installation') gr.add_option('--distcheck-args', metavar='ARGS', help='arguments to pass to distcheck', default=None, action='store') def jobs(self): """ Finds the optimal amount of cpu cores to use for parallel jobs. At runtime the options can be obtained from :py:const:`waflib.Options.options` :: from waflib.Options import options njobs = options.jobs :return: the amount of cpu cores :rtype: int """ count = int(os.environ.get('JOBS', 0)) if count < 1: if 'NUMBER_OF_PROCESSORS' in os.environ: # on Windows, use the NUMBER_OF_PROCESSORS environment variable count = int(os.environ.get('NUMBER_OF_PROCESSORS', 1)) else: # on everything else, first try the POSIX sysconf values if hasattr(os, 'sysconf_names'): if 'SC_NPROCESSORS_ONLN' in os.sysconf_names: count = int(os.sysconf('SC_NPROCESSORS_ONLN')) elif 'SC_NPROCESSORS_CONF' in os.sysconf_names: count = int(os.sysconf('SC_NPROCESSORS_CONF')) if not count and os.name not in ('nt', 'java'): try: tmp = self.cmd_and_log(['sysctl', '-n', 'hw.ncpu'], quiet=0) except Errors.WafError: pass else: if re.match('^[0-9]+$', tmp): count = int(tmp) if count < 1: count = 1 elif count > 1024: count = 1024 return count def add_option(self, *k, **kw): """ Wraps ``optparse.add_option``:: def options(ctx): ctx.add_option('-u', '--use', dest='use', default=False, action='store_true', help='a boolean option') :rtype: optparse option object """ return self.parser.add_option(*k, **kw) def add_option_group(self, *k, **kw): """ Wraps ``optparse.add_option_group``:: def options(ctx): gr = ctx.add_option_group('some options') gr.add_option('-u', '--use', dest='use', default=False, action='store_true') :rtype: optparse option group object """ try: gr = self.option_groups[k[0]] except KeyError: gr = self.parser.add_option_group(*k, **kw) self.option_groups[k[0]] = gr return gr def get_option_group(self, opt_str): """ Wraps ``optparse.get_option_group``:: def options(ctx): gr = ctx.get_option_group('configure options') gr.add_option('-o', '--out', action='store', default='', help='build dir for the project', dest='out') :rtype: optparse option group object """ try: return self.option_groups[opt_str] except KeyError: for group in self.parser.option_groups: if group.title == opt_str: return group return None def sanitize_path(self, path, cwd=None): if not cwd: cwd = Context.launch_dir p = os.path.expanduser(path) p = os.path.join(cwd, p) p = os.path.normpath(p) p = os.path.abspath(p) return p def parse_cmd_args(self, _args=None, cwd=None, allow_unknown=False): """ Just parse the arguments """ self.parser.allow_unknown = allow_unknown (options, leftover_args) = self.parser.parse_args(args=_args) envvars = [] commands = [] for arg in leftover_args: if '=' in arg: envvars.append(arg) elif arg != 'options': commands.append(arg) for name in 'top out destdir prefix bindir libdir'.split(): # those paths are usually expanded from Context.launch_dir if getattr(options, name, None): path = self.sanitize_path(getattr(options, name), cwd) setattr(options, name, path) return options, commands, envvars def init_module_vars(self, arg_options, arg_commands, arg_envvars): options.__dict__.clear() del commands[:] del envvars[:] options.__dict__.update(arg_options.__dict__) commands.extend(arg_commands) envvars.extend(arg_envvars) for var in envvars: (name, value) = var.split('=', 1) os.environ[name.strip()] = value def init_logs(self, options, commands, envvars): Logs.verbose = options.verbose if options.verbose >= 1: self.load('errcheck') colors = {'yes' : 2, 'auto' : 1, 'no' : 0}[options.colors] Logs.enable_colors(colors) if options.zones: Logs.zones = options.zones.split(',') if not Logs.verbose: Logs.verbose = 1 elif Logs.verbose > 0: Logs.zones = ['runner'] if Logs.verbose > 2: Logs.zones = ['*'] def parse_args(self, _args=None): """ Parses arguments from a list which is not necessarily the command-line. Initializes the module variables options, commands and envvars If help is requested, prints it and exit the application :param _args: arguments :type _args: list of strings """ options, commands, envvars = self.parse_cmd_args() self.init_logs(options, commands, envvars) self.init_module_vars(options, commands, envvars) def execute(self): """ See :py:func:`waflib.Context.Context.execute` """ super(OptionsContext, self).execute() self.parse_args() Utils.alloc_process_pool(options.jobs) tdb-1.4.2/third_party/waf/waflib/Runner.py0000660000000000000000000004001013527011455020416 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005-2018 (ita) """ Runner.py: Task scheduling and execution """ import heapq, traceback try: from queue import Queue, PriorityQueue except ImportError: from Queue import Queue try: from Queue import PriorityQueue except ImportError: class PriorityQueue(Queue): def _init(self, maxsize): self.maxsize = maxsize self.queue = [] def _put(self, item): heapq.heappush(self.queue, item) def _get(self): return heapq.heappop(self.queue) from waflib import Utils, Task, Errors, Logs GAP = 5 """ Wait for at least ``GAP * njobs`` before trying to enqueue more tasks to run """ class PriorityTasks(object): def __init__(self): self.lst = [] def __len__(self): return len(self.lst) def __iter__(self): return iter(self.lst) def __str__(self): return 'PriorityTasks: [%s]' % '\n '.join(str(x) for x in self.lst) def clear(self): self.lst = [] def append(self, task): heapq.heappush(self.lst, task) def appendleft(self, task): "Deprecated, do not use" heapq.heappush(self.lst, task) def pop(self): return heapq.heappop(self.lst) def extend(self, lst): if self.lst: for x in lst: self.append(x) else: if isinstance(lst, list): self.lst = lst heapq.heapify(lst) else: self.lst = lst.lst class Consumer(Utils.threading.Thread): """ Daemon thread object that executes a task. It shares a semaphore with the coordinator :py:class:`waflib.Runner.Spawner`. There is one instance per task to consume. """ def __init__(self, spawner, task): Utils.threading.Thread.__init__(self) self.task = task """Task to execute""" self.spawner = spawner """Coordinator object""" self.setDaemon(1) self.start() def run(self): """ Processes a single task """ try: if not self.spawner.master.stop: self.spawner.master.process_task(self.task) finally: self.spawner.sem.release() self.spawner.master.out.put(self.task) self.task = None self.spawner = None class Spawner(Utils.threading.Thread): """ Daemon thread that consumes tasks from :py:class:`waflib.Runner.Parallel` producer and spawns a consuming thread :py:class:`waflib.Runner.Consumer` for each :py:class:`waflib.Task.Task` instance. """ def __init__(self, master): Utils.threading.Thread.__init__(self) self.master = master """:py:class:`waflib.Runner.Parallel` producer instance""" self.sem = Utils.threading.Semaphore(master.numjobs) """Bounded semaphore that prevents spawning more than *n* concurrent consumers""" self.setDaemon(1) self.start() def run(self): """ Spawns new consumers to execute tasks by delegating to :py:meth:`waflib.Runner.Spawner.loop` """ try: self.loop() except Exception: # Python 2 prints unnecessary messages when shutting down # we also want to stop the thread properly pass def loop(self): """ Consumes task objects from the producer; ends when the producer has no more task to provide. """ master = self.master while 1: task = master.ready.get() self.sem.acquire() if not master.stop: task.log_display(task.generator.bld) Consumer(self, task) class Parallel(object): """ Schedule the tasks obtained from the build context for execution. """ def __init__(self, bld, j=2): """ The initialization requires a build context reference for computing the total number of jobs. """ self.numjobs = j """ Amount of parallel consumers to use """ self.bld = bld """ Instance of :py:class:`waflib.Build.BuildContext` """ self.outstanding = PriorityTasks() """Heap of :py:class:`waflib.Task.Task` that may be ready to be executed""" self.postponed = PriorityTasks() """Heap of :py:class:`waflib.Task.Task` which are not ready to run for non-DAG reasons""" self.incomplete = set() """List of :py:class:`waflib.Task.Task` waiting for dependent tasks to complete (DAG)""" self.ready = PriorityQueue(0) """List of :py:class:`waflib.Task.Task` ready to be executed by consumers""" self.out = Queue(0) """List of :py:class:`waflib.Task.Task` returned by the task consumers""" self.count = 0 """Amount of tasks that may be processed by :py:class:`waflib.Runner.TaskConsumer`""" self.processed = 0 """Amount of tasks processed""" self.stop = False """Error flag to stop the build""" self.error = [] """Tasks that could not be executed""" self.biter = None """Task iterator which must give groups of parallelizable tasks when calling ``next()``""" self.dirty = False """ Flag that indicates that the build cache must be saved when a task was executed (calls :py:meth:`waflib.Build.BuildContext.store`)""" self.revdeps = Utils.defaultdict(set) """ The reverse dependency graph of dependencies obtained from Task.run_after """ self.spawner = None """ Coordinating daemon thread that spawns thread consumers """ if self.numjobs > 1: self.spawner = Spawner(self) def get_next_task(self): """ Obtains the next Task instance to run :rtype: :py:class:`waflib.Task.Task` """ if not self.outstanding: return None return self.outstanding.pop() def postpone(self, tsk): """ Adds the task to the list :py:attr:`waflib.Runner.Parallel.postponed`. The order is scrambled so as to consume as many tasks in parallel as possible. :param tsk: task instance :type tsk: :py:class:`waflib.Task.Task` """ self.postponed.append(tsk) def refill_task_list(self): """ Pulls a next group of tasks to execute in :py:attr:`waflib.Runner.Parallel.outstanding`. Ensures that all tasks in the current build group are complete before processing the next one. """ while self.count > self.numjobs * GAP: self.get_out() while not self.outstanding: if self.count: self.get_out() if self.outstanding: break elif self.postponed: try: cond = self.deadlock == self.processed except AttributeError: pass else: if cond: # The most common reason is conflicting build order declaration # for example: "X run_after Y" and "Y run_after X" # Another can be changing "run_after" dependencies while the build is running # for example: updating "tsk.run_after" in the "runnable_status" method lst = [] for tsk in self.postponed: deps = [id(x) for x in tsk.run_after if not x.hasrun] lst.append('%s\t-> %r' % (repr(tsk), deps)) if not deps: lst.append('\n task %r dependencies are done, check its *runnable_status*?' % id(tsk)) raise Errors.WafError('Deadlock detected: check the task build order%s' % ''.join(lst)) self.deadlock = self.processed if self.postponed: self.outstanding.extend(self.postponed) self.postponed.clear() elif not self.count: if self.incomplete: for x in self.incomplete: for k in x.run_after: if not k.hasrun: break else: # dependency added after the build started without updating revdeps self.incomplete.remove(x) self.outstanding.append(x) break else: if self.stop or self.error: break raise Errors.WafError('Broken revdeps detected on %r' % self.incomplete) else: tasks = next(self.biter) ready, waiting = self.prio_and_split(tasks) self.outstanding.extend(ready) self.incomplete.update(waiting) self.total = self.bld.total() break def add_more_tasks(self, tsk): """ If a task provides :py:attr:`waflib.Task.Task.more_tasks`, then the tasks contained in that list are added to the current build and will be processed before the next build group. The priorities for dependent tasks are not re-calculated globally :param tsk: task instance :type tsk: :py:attr:`waflib.Task.Task` """ if getattr(tsk, 'more_tasks', None): more = set(tsk.more_tasks) groups_done = set() def iteri(a, b): for x in a: yield x for x in b: yield x # Update the dependency tree # this assumes that task.run_after values were updated for x in iteri(self.outstanding, self.incomplete): for k in x.run_after: if isinstance(k, Task.TaskGroup): if k not in groups_done: groups_done.add(k) for j in k.prev & more: self.revdeps[j].add(k) elif k in more: self.revdeps[k].add(x) ready, waiting = self.prio_and_split(tsk.more_tasks) self.outstanding.extend(ready) self.incomplete.update(waiting) self.total += len(tsk.more_tasks) def mark_finished(self, tsk): def try_unfreeze(x): # DAG ancestors are likely to be in the incomplete set # This assumes that the run_after contents have not changed # after the build starts, else a deadlock may occur if x in self.incomplete: # TODO remove dependencies to free some memory? # x.run_after.remove(tsk) for k in x.run_after: if not k.hasrun: break else: self.incomplete.remove(x) self.outstanding.append(x) if tsk in self.revdeps: for x in self.revdeps[tsk]: if isinstance(x, Task.TaskGroup): x.prev.remove(tsk) if not x.prev: for k in x.next: # TODO necessary optimization? k.run_after.remove(x) try_unfreeze(k) # TODO necessary optimization? x.next = [] else: try_unfreeze(x) del self.revdeps[tsk] if hasattr(tsk, 'semaphore'): sem = tsk.semaphore try: sem.release(tsk) except KeyError: # TODO pass else: while sem.waiting and not sem.is_locked(): # take a frozen task, make it ready to run x = sem.waiting.pop() self._add_task(x) def get_out(self): """ Waits for a Task that task consumers add to :py:attr:`waflib.Runner.Parallel.out` after execution. Adds more Tasks if necessary through :py:attr:`waflib.Runner.Parallel.add_more_tasks`. :rtype: :py:attr:`waflib.Task.Task` """ tsk = self.out.get() if not self.stop: self.add_more_tasks(tsk) self.mark_finished(tsk) self.count -= 1 self.dirty = True return tsk def add_task(self, tsk): """ Enqueue a Task to :py:attr:`waflib.Runner.Parallel.ready` so that consumers can run them. :param tsk: task instance :type tsk: :py:attr:`waflib.Task.Task` """ # TODO change in waf 2.1 self.ready.put(tsk) def _add_task(self, tsk): if hasattr(tsk, 'semaphore'): sem = tsk.semaphore try: sem.acquire(tsk) except IndexError: sem.waiting.add(tsk) return self.count += 1 self.processed += 1 if self.numjobs == 1: tsk.log_display(tsk.generator.bld) try: self.process_task(tsk) finally: self.out.put(tsk) else: self.add_task(tsk) def process_task(self, tsk): """ Processes a task and attempts to stop the build in case of errors """ tsk.process() if tsk.hasrun != Task.SUCCESS: self.error_handler(tsk) def skip(self, tsk): """ Mark a task as skipped/up-to-date """ tsk.hasrun = Task.SKIPPED self.mark_finished(tsk) def cancel(self, tsk): """ Mark a task as failed because of unsatisfiable dependencies """ tsk.hasrun = Task.CANCELED self.mark_finished(tsk) def error_handler(self, tsk): """ Called when a task cannot be executed. The flag :py:attr:`waflib.Runner.Parallel.stop` is set, unless the build is executed with:: $ waf build -k :param tsk: task instance :type tsk: :py:attr:`waflib.Task.Task` """ if not self.bld.keep: self.stop = True self.error.append(tsk) def task_status(self, tsk): """ Obtains the task status to decide whether to run it immediately or not. :return: the exit status, for example :py:attr:`waflib.Task.ASK_LATER` :rtype: integer """ try: return tsk.runnable_status() except Exception: self.processed += 1 tsk.err_msg = traceback.format_exc() if not self.stop and self.bld.keep: self.skip(tsk) if self.bld.keep == 1: # if -k stop on the first exception, if -kk try to go as far as possible if Logs.verbose > 1 or not self.error: self.error.append(tsk) self.stop = True else: if Logs.verbose > 1: self.error.append(tsk) return Task.EXCEPTION tsk.hasrun = Task.EXCEPTION self.error_handler(tsk) return Task.EXCEPTION def start(self): """ Obtains Task instances from the BuildContext instance and adds the ones that need to be executed to :py:class:`waflib.Runner.Parallel.ready` so that the :py:class:`waflib.Runner.Spawner` consumer thread has them executed. Obtains the executed Tasks back from :py:class:`waflib.Runner.Parallel.out` and marks the build as failed by setting the ``stop`` flag. If only one job is used, then executes the tasks one by one, without consumers. """ self.total = self.bld.total() while not self.stop: self.refill_task_list() # consider the next task tsk = self.get_next_task() if not tsk: if self.count: # tasks may add new ones after they are run continue else: # no tasks to run, no tasks running, time to exit break if tsk.hasrun: # if the task is marked as "run", just skip it self.processed += 1 continue if self.stop: # stop immediately after a failure is detected break st = self.task_status(tsk) if st == Task.RUN_ME: self._add_task(tsk) elif st == Task.ASK_LATER: self.postpone(tsk) elif st == Task.SKIP_ME: self.processed += 1 self.skip(tsk) self.add_more_tasks(tsk) elif st == Task.CANCEL_ME: # A dependency problem has occurred, and the # build is most likely run with `waf -k` if Logs.verbose > 1: self.error.append(tsk) self.processed += 1 self.cancel(tsk) # self.count represents the tasks that have been made available to the consumer threads # collect all the tasks after an error else the message may be incomplete while self.error and self.count: self.get_out() self.ready.put(None) if not self.stop: assert not self.count assert not self.postponed assert not self.incomplete def prio_and_split(self, tasks): """ Label input tasks with priority values, and return a pair containing the tasks that are ready to run and the tasks that are necessarily waiting for other tasks to complete. The priority system is really meant as an optional layer for optimization: dependency cycles are found quickly, and builds should be more efficient. A high priority number means that a task is processed first. This method can be overridden to disable the priority system:: def prio_and_split(self, tasks): return tasks, [] :return: A pair of task lists :rtype: tuple """ # to disable: #return tasks, [] for x in tasks: x.visited = 0 reverse = self.revdeps groups_done = set() for x in tasks: for k in x.run_after: if isinstance(k, Task.TaskGroup): if k not in groups_done: groups_done.add(k) for j in k.prev: reverse[j].add(k) else: reverse[k].add(x) # the priority number is not the tree depth def visit(n): if isinstance(n, Task.TaskGroup): return sum(visit(k) for k in n.next) if n.visited == 0: n.visited = 1 if n in reverse: rev = reverse[n] n.prio_order = n.tree_weight + len(rev) + sum(visit(k) for k in rev) else: n.prio_order = n.tree_weight n.visited = 2 elif n.visited == 1: raise Errors.WafError('Dependency cycle found!') return n.prio_order for x in tasks: if x.visited != 0: # must visit all to detect cycles continue try: visit(x) except Errors.WafError: self.debug_cycles(tasks, reverse) ready = [] waiting = [] for x in tasks: for k in x.run_after: if not k.hasrun: waiting.append(x) break else: ready.append(x) return (ready, waiting) def debug_cycles(self, tasks, reverse): tmp = {} for x in tasks: tmp[x] = 0 def visit(n, acc): if isinstance(n, Task.TaskGroup): for k in n.next: visit(k, acc) return if tmp[n] == 0: tmp[n] = 1 for k in reverse.get(n, []): visit(k, [n] + acc) tmp[n] = 2 elif tmp[n] == 1: lst = [] for tsk in acc: lst.append(repr(tsk)) if tsk is n: # exclude prior nodes, we want the minimum cycle break raise Errors.WafError('Task dependency cycle in "run_after" constraints: %s' % ''.join(lst)) for x in tasks: visit(x, []) tdb-1.4.2/third_party/waf/waflib/Scripting.py0000660000000000000000000004000613527011455021114 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005-2018 (ita) "Module called for configuring, compiling and installing targets" from __future__ import with_statement import os, shlex, shutil, traceback, errno, sys, stat from waflib import Utils, Configure, Logs, Options, ConfigSet, Context, Errors, Build, Node build_dir_override = None no_climb_commands = ['configure'] default_cmd = "build" def waf_entry_point(current_directory, version, wafdir): """ This is the main entry point, all Waf execution starts here. :param current_directory: absolute path representing the current directory :type current_directory: string :param version: version number :type version: string :param wafdir: absolute path representing the directory of the waf library :type wafdir: string """ Logs.init_log() if Context.WAFVERSION != version: Logs.error('Waf script %r and library %r do not match (directory %r)', version, Context.WAFVERSION, wafdir) sys.exit(1) # Store current directory before any chdir Context.waf_dir = wafdir Context.run_dir = Context.launch_dir = current_directory start_dir = current_directory no_climb = os.environ.get('NOCLIMB') if len(sys.argv) > 1: # os.path.join handles absolute paths # if sys.argv[1] is not an absolute path, then it is relative to the current working directory potential_wscript = os.path.join(current_directory, sys.argv[1]) if os.path.basename(potential_wscript) == Context.WSCRIPT_FILE and os.path.isfile(potential_wscript): # need to explicitly normalize the path, as it may contain extra '/.' path = os.path.normpath(os.path.dirname(potential_wscript)) start_dir = os.path.abspath(path) no_climb = True sys.argv.pop(1) ctx = Context.create_context('options') (options, commands, env) = ctx.parse_cmd_args(allow_unknown=True) if options.top: start_dir = Context.run_dir = Context.top_dir = options.top no_climb = True if options.out: Context.out_dir = options.out # if 'configure' is in the commands, do not search any further if not no_climb: for k in no_climb_commands: for y in commands: if y.startswith(k): no_climb = True break # try to find a lock file (if the project was configured) # at the same time, store the first wscript file seen cur = start_dir while cur: try: lst = os.listdir(cur) except OSError: lst = [] Logs.error('Directory %r is unreadable!', cur) if Options.lockfile in lst: env = ConfigSet.ConfigSet() try: env.load(os.path.join(cur, Options.lockfile)) ino = os.stat(cur)[stat.ST_INO] except EnvironmentError: pass else: # check if the folder was not moved for x in (env.run_dir, env.top_dir, env.out_dir): if not x: continue if Utils.is_win32: if cur == x: load = True break else: # if the filesystem features symlinks, compare the inode numbers try: ino2 = os.stat(x)[stat.ST_INO] except OSError: pass else: if ino == ino2: load = True break else: Logs.warn('invalid lock file in %s', cur) load = False if load: Context.run_dir = env.run_dir Context.top_dir = env.top_dir Context.out_dir = env.out_dir break if not Context.run_dir: if Context.WSCRIPT_FILE in lst: Context.run_dir = cur next = os.path.dirname(cur) if next == cur: break cur = next if no_climb: break wscript = os.path.normpath(os.path.join(Context.run_dir, Context.WSCRIPT_FILE)) if not os.path.exists(wscript): if options.whelp: Logs.warn('These are the generic options (no wscript/project found)') ctx.parser.print_help() sys.exit(0) Logs.error('Waf: Run from a folder containing a %r file (or try -h for the generic options)', Context.WSCRIPT_FILE) sys.exit(1) try: os.chdir(Context.run_dir) except OSError: Logs.error('Waf: The folder %r is unreadable', Context.run_dir) sys.exit(1) try: set_main_module(wscript) except Errors.WafError as e: Logs.pprint('RED', e.verbose_msg) Logs.error(str(e)) sys.exit(1) except Exception as e: Logs.error('Waf: The wscript in %r is unreadable', Context.run_dir) traceback.print_exc(file=sys.stdout) sys.exit(2) if options.profile: import cProfile, pstats cProfile.runctx('from waflib import Scripting; Scripting.run_commands()', {}, {}, 'profi.txt') p = pstats.Stats('profi.txt') p.sort_stats('time').print_stats(75) # or 'cumulative' else: try: try: run_commands() except: if options.pdb: import pdb type, value, tb = sys.exc_info() traceback.print_exc() pdb.post_mortem(tb) else: raise except Errors.WafError as e: if Logs.verbose > 1: Logs.pprint('RED', e.verbose_msg) Logs.error(e.msg) sys.exit(1) except SystemExit: raise except Exception as e: traceback.print_exc(file=sys.stdout) sys.exit(2) except KeyboardInterrupt: Logs.pprint('RED', 'Interrupted') sys.exit(68) def set_main_module(file_path): """ Read the main wscript file into :py:const:`waflib.Context.Context.g_module` and bind default functions such as ``init``, ``dist``, ``distclean`` if not defined. Called by :py:func:`waflib.Scripting.waf_entry_point` during the initialization. :param file_path: absolute path representing the top-level wscript file :type file_path: string """ Context.g_module = Context.load_module(file_path) Context.g_module.root_path = file_path # note: to register the module globally, use the following: # sys.modules['wscript_main'] = g_module def set_def(obj): name = obj.__name__ if not name in Context.g_module.__dict__: setattr(Context.g_module, name, obj) for k in (dist, distclean, distcheck): set_def(k) # add dummy init and shutdown functions if they're not defined if not 'init' in Context.g_module.__dict__: Context.g_module.init = Utils.nada if not 'shutdown' in Context.g_module.__dict__: Context.g_module.shutdown = Utils.nada if not 'options' in Context.g_module.__dict__: Context.g_module.options = Utils.nada def parse_options(): """ Parses the command-line options and initialize the logging system. Called by :py:func:`waflib.Scripting.waf_entry_point` during the initialization. """ ctx = Context.create_context('options') ctx.execute() if not Options.commands: if isinstance(default_cmd, list): Options.commands.extend(default_cmd) else: Options.commands.append(default_cmd) if Options.options.whelp: ctx.parser.print_help() sys.exit(0) def run_command(cmd_name): """ Executes a single Waf command. Called by :py:func:`waflib.Scripting.run_commands`. :param cmd_name: command to execute, like ``build`` :type cmd_name: string """ ctx = Context.create_context(cmd_name) ctx.log_timer = Utils.Timer() ctx.options = Options.options # provided for convenience ctx.cmd = cmd_name try: ctx.execute() finally: # Issue 1374 ctx.finalize() return ctx def run_commands(): """ Execute the Waf commands that were given on the command-line, and the other options Called by :py:func:`waflib.Scripting.waf_entry_point` during the initialization, and executed after :py:func:`waflib.Scripting.parse_options`. """ parse_options() run_command('init') while Options.commands: cmd_name = Options.commands.pop(0) ctx = run_command(cmd_name) Logs.info('%r finished successfully (%s)', cmd_name, ctx.log_timer) run_command('shutdown') ########################################################################################### def distclean_dir(dirname): """ Distclean function called in the particular case when:: top == out :param dirname: absolute path of the folder to clean :type dirname: string """ for (root, dirs, files) in os.walk(dirname): for f in files: if f.endswith(('.o', '.moc', '.exe')): fname = os.path.join(root, f) try: os.remove(fname) except OSError: Logs.warn('Could not remove %r', fname) for x in (Context.DBFILE, 'config.log'): try: os.remove(x) except OSError: pass try: shutil.rmtree(Build.CACHE_DIR) except OSError: pass def distclean(ctx): '''removes build folders and data''' def remove_and_log(k, fun): try: fun(k) except EnvironmentError as e: if e.errno != errno.ENOENT: Logs.warn('Could not remove %r', k) # remove waf cache folders on the top-level if not Options.commands: for k in os.listdir('.'): for x in '.waf-2 waf-2 .waf3-2 waf3-2'.split(): if k.startswith(x): remove_and_log(k, shutil.rmtree) # remove a build folder, if any cur = '.' if ctx.options.no_lock_in_top: cur = ctx.options.out try: lst = os.listdir(cur) except OSError: Logs.warn('Could not read %r', cur) return if Options.lockfile in lst: f = os.path.join(cur, Options.lockfile) try: env = ConfigSet.ConfigSet(f) except EnvironmentError: Logs.warn('Could not read %r', f) return if not env.out_dir or not env.top_dir: Logs.warn('Invalid lock file %r', f) return if env.out_dir == env.top_dir: distclean_dir(env.out_dir) else: remove_and_log(env.out_dir, shutil.rmtree) for k in (env.out_dir, env.top_dir, env.run_dir): p = os.path.join(k, Options.lockfile) remove_and_log(p, os.remove) class Dist(Context.Context): '''creates an archive containing the project source code''' cmd = 'dist' fun = 'dist' algo = 'tar.bz2' ext_algo = {} def execute(self): """ See :py:func:`waflib.Context.Context.execute` """ self.recurse([os.path.dirname(Context.g_module.root_path)]) self.archive() def archive(self): """ Creates the source archive. """ import tarfile arch_name = self.get_arch_name() try: self.base_path except AttributeError: self.base_path = self.path node = self.base_path.make_node(arch_name) try: node.delete() except OSError: pass files = self.get_files() if self.algo.startswith('tar.'): tar = tarfile.open(node.abspath(), 'w:' + self.algo.replace('tar.', '')) for x in files: self.add_tar_file(x, tar) tar.close() elif self.algo == 'zip': import zipfile zip = zipfile.ZipFile(node.abspath(), 'w', compression=zipfile.ZIP_DEFLATED) for x in files: archive_name = self.get_base_name() + '/' + x.path_from(self.base_path) zip.write(x.abspath(), archive_name, zipfile.ZIP_DEFLATED) zip.close() else: self.fatal('Valid algo types are tar.bz2, tar.gz, tar.xz or zip') try: from hashlib import sha256 except ImportError: digest = '' else: digest = ' (sha256=%r)' % sha256(node.read(flags='rb')).hexdigest() Logs.info('New archive created: %s%s', self.arch_name, digest) def get_tar_path(self, node): """ Return the path to use for a node in the tar archive, the purpose of this is to let subclases resolve symbolic links or to change file names :return: absolute path :rtype: string """ return node.abspath() def add_tar_file(self, x, tar): """ Adds a file to the tar archive. Symlinks are not verified. :param x: file path :param tar: tar file object """ p = self.get_tar_path(x) tinfo = tar.gettarinfo(name=p, arcname=self.get_tar_prefix() + '/' + x.path_from(self.base_path)) tinfo.uid = 0 tinfo.gid = 0 tinfo.uname = 'root' tinfo.gname = 'root' if os.path.isfile(p): with open(p, 'rb') as f: tar.addfile(tinfo, fileobj=f) else: tar.addfile(tinfo) def get_tar_prefix(self): """ Returns the base path for files added into the archive tar file :rtype: string """ try: return self.tar_prefix except AttributeError: return self.get_base_name() def get_arch_name(self): """ Returns the archive file name. Set the attribute *arch_name* to change the default value:: def dist(ctx): ctx.arch_name = 'ctx.tar.bz2' :rtype: string """ try: self.arch_name except AttributeError: self.arch_name = self.get_base_name() + '.' + self.ext_algo.get(self.algo, self.algo) return self.arch_name def get_base_name(self): """ Returns the default name of the main directory in the archive, which is set to *appname-version*. Set the attribute *base_name* to change the default value:: def dist(ctx): ctx.base_name = 'files' :rtype: string """ try: self.base_name except AttributeError: appname = getattr(Context.g_module, Context.APPNAME, 'noname') version = getattr(Context.g_module, Context.VERSION, '1.0') self.base_name = appname + '-' + version return self.base_name def get_excl(self): """ Returns the patterns to exclude for finding the files in the top-level directory. Set the attribute *excl* to change the default value:: def dist(ctx): ctx.excl = 'build **/*.o **/*.class' :rtype: string """ try: return self.excl except AttributeError: self.excl = Node.exclude_regs + ' **/waf-2.* **/.waf-2.* **/waf3-2.* **/.waf3-2.* **/*~ **/*.rej **/*.orig **/*.pyc **/*.pyo **/*.bak **/*.swp **/.lock-w*' if Context.out_dir: nd = self.root.find_node(Context.out_dir) if nd: self.excl += ' ' + nd.path_from(self.base_path) return self.excl def get_files(self): """ Files to package are searched automatically by :py:func:`waflib.Node.Node.ant_glob`. Set *files* to prevent this behaviour:: def dist(ctx): ctx.files = ctx.path.find_node('wscript') Files are also searched from the directory 'base_path', to change it, set:: def dist(ctx): ctx.base_path = path :rtype: list of :py:class:`waflib.Node.Node` """ try: files = self.files except AttributeError: files = self.base_path.ant_glob('**/*', excl=self.get_excl()) return files def dist(ctx): '''makes a tarball for redistributing the sources''' pass class DistCheck(Dist): """creates an archive with dist, then tries to build it""" fun = 'distcheck' cmd = 'distcheck' def execute(self): """ See :py:func:`waflib.Context.Context.execute` """ self.recurse([os.path.dirname(Context.g_module.root_path)]) self.archive() self.check() def make_distcheck_cmd(self, tmpdir): cfg = [] if Options.options.distcheck_args: cfg = shlex.split(Options.options.distcheck_args) else: cfg = [x for x in sys.argv if x.startswith('-')] cmd = [sys.executable, sys.argv[0], 'configure', 'build', 'install', 'uninstall', '--destdir=' + tmpdir] + cfg return cmd def check(self): """ Creates the archive, uncompresses it and tries to build the project """ import tempfile, tarfile with tarfile.open(self.get_arch_name()) as t: for x in t: t.extract(x) instdir = tempfile.mkdtemp('.inst', self.get_base_name()) cmd = self.make_distcheck_cmd(instdir) ret = Utils.subprocess.Popen(cmd, cwd=self.get_base_name()).wait() if ret: raise Errors.WafError('distcheck failed with code %r' % ret) if os.path.exists(instdir): raise Errors.WafError('distcheck succeeded, but files were left in %s' % instdir) shutil.rmtree(self.get_base_name()) def distcheck(ctx): '''checks if the project compiles (tarball from 'dist')''' pass def autoconfigure(execute_method): """ Decorator that enables context commands to run *configure* as needed. """ def execute(self): """ Wraps :py:func:`waflib.Context.Context.execute` on the context class """ if not Configure.autoconfig: return execute_method(self) env = ConfigSet.ConfigSet() do_config = False try: env.load(os.path.join(Context.top_dir, Options.lockfile)) except EnvironmentError: Logs.warn('Configuring the project') do_config = True else: if env.run_dir != Context.run_dir: do_config = True else: h = 0 for f in env.files: try: h = Utils.h_list((h, Utils.readf(f, 'rb'))) except EnvironmentError: do_config = True break else: do_config = h != env.hash if do_config: cmd = env.config_cmd or 'configure' if Configure.autoconfig == 'clobber': tmp = Options.options.__dict__ launch_dir_tmp = Context.launch_dir if env.options: Options.options.__dict__ = env.options Context.launch_dir = env.launch_dir try: run_command(cmd) finally: Options.options.__dict__ = tmp Context.launch_dir = launch_dir_tmp else: run_command(cmd) run_command(self.cmd) else: return execute_method(self) return execute Build.BuildContext.execute = autoconfigure(Build.BuildContext.execute) tdb-1.4.2/third_party/waf/waflib/Task.py0000660000000000000000000011507613527011455020066 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005-2018 (ita) """ Tasks represent atomic operations such as processes. """ import os, re, sys, tempfile, traceback from waflib import Utils, Logs, Errors # task states NOT_RUN = 0 """The task was not executed yet""" MISSING = 1 """The task has been executed but the files have not been created""" CRASHED = 2 """The task execution returned a non-zero exit status""" EXCEPTION = 3 """An exception occurred in the task execution""" CANCELED = 4 """A dependency for the task is missing so it was cancelled""" SKIPPED = 8 """The task did not have to be executed""" SUCCESS = 9 """The task was successfully executed""" ASK_LATER = -1 """The task is not ready to be executed""" SKIP_ME = -2 """The task does not need to be executed""" RUN_ME = -3 """The task must be executed""" CANCEL_ME = -4 """The task cannot be executed because of a dependency problem""" COMPILE_TEMPLATE_SHELL = ''' def f(tsk): env = tsk.env gen = tsk.generator bld = gen.bld cwdx = tsk.get_cwd() p = env.get_flat def to_list(xx): if isinstance(xx, str): return [xx] return xx tsk.last_cmd = cmd = \'\'\' %s \'\'\' % s return tsk.exec_command(cmd, cwd=cwdx, env=env.env or None) ''' COMPILE_TEMPLATE_NOSHELL = ''' def f(tsk): env = tsk.env gen = tsk.generator bld = gen.bld cwdx = tsk.get_cwd() def to_list(xx): if isinstance(xx, str): return [xx] return xx def merge(lst1, lst2): if lst1 and lst2: return lst1[:-1] + [lst1[-1] + lst2[0]] + lst2[1:] return lst1 + lst2 lst = [] %s if '' in lst: lst = [x for x in lst if x] tsk.last_cmd = lst return tsk.exec_command(lst, cwd=cwdx, env=env.env or None) ''' COMPILE_TEMPLATE_SIG_VARS = ''' def f(tsk): sig = tsk.generator.bld.hash_env_vars(tsk.env, tsk.vars) tsk.m.update(sig) env = tsk.env gen = tsk.generator bld = gen.bld cwdx = tsk.get_cwd() p = env.get_flat buf = [] %s tsk.m.update(repr(buf).encode()) ''' classes = {} """ The metaclass :py:class:`waflib.Task.store_task_type` stores all class tasks created by user scripts or Waf tools to this dict. It maps class names to class objects. """ class store_task_type(type): """ Metaclass: store the task classes into the dict pointed by the class attribute 'register' which defaults to :py:const:`waflib.Task.classes`, The attribute 'run_str' is compiled into a method 'run' bound to the task class. """ def __init__(cls, name, bases, dict): super(store_task_type, cls).__init__(name, bases, dict) name = cls.__name__ if name != 'evil' and name != 'Task': if getattr(cls, 'run_str', None): # if a string is provided, convert it to a method (f, dvars) = compile_fun(cls.run_str, cls.shell) cls.hcode = Utils.h_cmd(cls.run_str) cls.orig_run_str = cls.run_str # change the name of run_str or it is impossible to subclass with a function cls.run_str = None cls.run = f # process variables cls.vars = list(set(cls.vars + dvars)) cls.vars.sort() if cls.vars: fun = compile_sig_vars(cls.vars) if fun: cls.sig_vars = fun elif getattr(cls, 'run', None) and not 'hcode' in cls.__dict__: # getattr(cls, 'hcode') would look in the upper classes cls.hcode = Utils.h_cmd(cls.run) # be creative getattr(cls, 'register', classes)[name] = cls evil = store_task_type('evil', (object,), {}) "Base class provided to avoid writing a metaclass, so the code can run in python 2.6 and 3.x unmodified" class Task(evil): """ Task objects represents actions to perform such as commands to execute by calling the `run` method. Detecting when to execute a task occurs in the method :py:meth:`waflib.Task.Task.runnable_status`. Detecting which tasks to execute is performed through a hash value returned by :py:meth:`waflib.Task.Task.signature`. The task signature is persistent from build to build. """ vars = [] """ConfigSet variables that should trigger a rebuild (class attribute used for :py:meth:`waflib.Task.Task.sig_vars`)""" always_run = False """Specify whether task instances must always be executed or not (class attribute)""" shell = False """Execute the command with the shell (class attribute)""" color = 'GREEN' """Color for the console display, see :py:const:`waflib.Logs.colors_lst`""" ext_in = [] """File extensions that objects of this task class may use""" ext_out = [] """File extensions that objects of this task class may create""" before = [] """The instances of this class are executed before the instances of classes whose names are in this list""" after = [] """The instances of this class are executed after the instances of classes whose names are in this list""" hcode = Utils.SIG_NIL """String representing an additional hash for the class representation""" keep_last_cmd = False """Whether to keep the last command executed on the instance after execution. This may be useful for certain extensions but it can a lot of memory. """ weight = 0 """Optional weight to tune the priority for task instances. The higher, the earlier. The weight only applies to single task objects.""" tree_weight = 0 """Optional weight to tune the priority of task instances and whole subtrees. The higher, the earlier.""" prio_order = 0 """Priority order set by the scheduler on instances during the build phase. You most likely do not need to set it. """ __slots__ = ('hasrun', 'generator', 'env', 'inputs', 'outputs', 'dep_nodes', 'run_after') def __init__(self, *k, **kw): self.hasrun = NOT_RUN try: self.generator = kw['generator'] except KeyError: self.generator = self self.env = kw['env'] """:py:class:`waflib.ConfigSet.ConfigSet` object (make sure to provide one)""" self.inputs = [] """List of input nodes, which represent the files used by the task instance""" self.outputs = [] """List of output nodes, which represent the files created by the task instance""" self.dep_nodes = [] """List of additional nodes to depend on""" self.run_after = set() """Set of tasks that must be executed before this one""" def __lt__(self, other): return self.priority() > other.priority() def __le__(self, other): return self.priority() >= other.priority() def __gt__(self, other): return self.priority() < other.priority() def __ge__(self, other): return self.priority() <= other.priority() def get_cwd(self): """ :return: current working directory :rtype: :py:class:`waflib.Node.Node` """ bld = self.generator.bld ret = getattr(self, 'cwd', None) or getattr(bld, 'cwd', bld.bldnode) if isinstance(ret, str): if os.path.isabs(ret): ret = bld.root.make_node(ret) else: ret = self.generator.path.make_node(ret) return ret def quote_flag(self, x): """ Surround a process argument by quotes so that a list of arguments can be written to a file :param x: flag :type x: string :return: quoted flag :rtype: string """ old = x if '\\' in x: x = x.replace('\\', '\\\\') if '"' in x: x = x.replace('"', '\\"') if old != x or ' ' in x or '\t' in x or "'" in x: x = '"%s"' % x return x def priority(self): """ Priority of execution; the higher, the earlier :return: the priority value :rtype: a tuple of numeric values """ return (self.weight + self.prio_order, - getattr(self.generator, 'tg_idx_count', 0)) def split_argfile(self, cmd): """ Splits a list of process commands into the executable part and its list of arguments :return: a tuple containing the executable first and then the rest of arguments :rtype: tuple """ return ([cmd[0]], [self.quote_flag(x) for x in cmd[1:]]) def exec_command(self, cmd, **kw): """ Wrapper for :py:meth:`waflib.Context.Context.exec_command`. This version set the current working directory (``build.variant_dir``), applies PATH settings (if self.env.PATH is provided), and can run long commands through a temporary ``@argfile``. :param cmd: process command to execute :type cmd: list of string (best) or string (process will use a shell) :return: the return code :rtype: int Optional parameters: #. cwd: current working directory (Node or string) #. stdout: set to None to prevent waf from capturing the process standard output #. stderr: set to None to prevent waf from capturing the process standard error #. timeout: timeout value (Python 3) """ if not 'cwd' in kw: kw['cwd'] = self.get_cwd() if hasattr(self, 'timeout'): kw['timeout'] = self.timeout if self.env.PATH: env = kw['env'] = dict(kw.get('env') or self.env.env or os.environ) env['PATH'] = self.env.PATH if isinstance(self.env.PATH, str) else os.pathsep.join(self.env.PATH) if hasattr(self, 'stdout'): kw['stdout'] = self.stdout if hasattr(self, 'stderr'): kw['stderr'] = self.stderr if not isinstance(cmd, str): if Utils.is_win32: # win32 compares the resulting length http://support.microsoft.com/kb/830473 too_long = sum([len(arg) for arg in cmd]) + len(cmd) > 8192 else: # non-win32 counts the amount of arguments (200k) too_long = len(cmd) > 200000 if too_long and getattr(self, 'allow_argsfile', True): # Shunt arguments to a temporary file if the command is too long. cmd, args = self.split_argfile(cmd) try: (fd, tmp) = tempfile.mkstemp() os.write(fd, '\r\n'.join(args).encode()) os.close(fd) if Logs.verbose: Logs.debug('argfile: @%r -> %r', tmp, args) return self.generator.bld.exec_command(cmd + ['@' + tmp], **kw) finally: try: os.remove(tmp) except OSError: # anti-virus and indexers can keep files open -_- pass return self.generator.bld.exec_command(cmd, **kw) def process(self): """ Runs the task and handles errors :return: 0 or None if everything is fine :rtype: integer """ # remove the task signature immediately before it is executed # so that the task will be executed again in case of failure try: del self.generator.bld.task_sigs[self.uid()] except KeyError: pass try: ret = self.run() except Exception: self.err_msg = traceback.format_exc() self.hasrun = EXCEPTION else: if ret: self.err_code = ret self.hasrun = CRASHED else: try: self.post_run() except Errors.WafError: pass except Exception: self.err_msg = traceback.format_exc() self.hasrun = EXCEPTION else: self.hasrun = SUCCESS if self.hasrun != SUCCESS and self.scan: # rescan dependencies on next run try: del self.generator.bld.imp_sigs[self.uid()] except KeyError: pass def log_display(self, bld): "Writes the execution status on the context logger" if self.generator.bld.progress_bar == 3: return s = self.display() if s: if bld.logger: logger = bld.logger else: logger = Logs if self.generator.bld.progress_bar == 1: c1 = Logs.colors.cursor_off c2 = Logs.colors.cursor_on logger.info(s, extra={'stream': sys.stderr, 'terminator':'', 'c1': c1, 'c2' : c2}) else: logger.info(s, extra={'terminator':'', 'c1': '', 'c2' : ''}) def display(self): """ Returns an execution status for the console, the progress bar, or the IDE output. :rtype: string """ col1 = Logs.colors(self.color) col2 = Logs.colors.NORMAL master = self.generator.bld.producer def cur(): # the current task position, computed as late as possible return master.processed - master.ready.qsize() if self.generator.bld.progress_bar == 1: return self.generator.bld.progress_line(cur(), master.total, col1, col2) if self.generator.bld.progress_bar == 2: ela = str(self.generator.bld.timer) try: ins = ','.join([n.name for n in self.inputs]) except AttributeError: ins = '' try: outs = ','.join([n.name for n in self.outputs]) except AttributeError: outs = '' return '|Total %s|Current %s|Inputs %s|Outputs %s|Time %s|\n' % (master.total, cur(), ins, outs, ela) s = str(self) if not s: return None total = master.total n = len(str(total)) fs = '[%%%dd/%%%dd] %%s%%s%%s%%s\n' % (n, n) kw = self.keyword() if kw: kw += ' ' return fs % (cur(), total, kw, col1, s, col2) def hash_constraints(self): """ Identifies a task type for all the constraints relevant for the scheduler: precedence, file production :return: a hash value :rtype: string """ return (tuple(self.before), tuple(self.after), tuple(self.ext_in), tuple(self.ext_out), self.__class__.__name__, self.hcode) def format_error(self): """ Returns an error message to display the build failure reasons :rtype: string """ if Logs.verbose: msg = ': %r\n%r' % (self, getattr(self, 'last_cmd', '')) else: msg = ' (run with -v to display more information)' name = getattr(self.generator, 'name', '') if getattr(self, "err_msg", None): return self.err_msg elif not self.hasrun: return 'task in %r was not executed for some reason: %r' % (name, self) elif self.hasrun == CRASHED: try: return ' -> task in %r failed with exit status %r%s' % (name, self.err_code, msg) except AttributeError: return ' -> task in %r failed%s' % (name, msg) elif self.hasrun == MISSING: return ' -> missing files in %r%s' % (name, msg) elif self.hasrun == CANCELED: return ' -> %r canceled because of missing dependencies' % name else: return 'invalid status for task in %r: %r' % (name, self.hasrun) def colon(self, var1, var2): """ Enable scriptlet expressions of the form ${FOO_ST:FOO} If the first variable (FOO_ST) is empty, then an empty list is returned The results will be slightly different if FOO_ST is a list, for example:: env.FOO = ['p1', 'p2'] env.FOO_ST = '-I%s' # ${FOO_ST:FOO} returns ['-Ip1', '-Ip2'] env.FOO_ST = ['-a', '-b'] # ${FOO_ST:FOO} returns ['-a', '-b', 'p1', '-a', '-b', 'p2'] """ tmp = self.env[var1] if not tmp: return [] if isinstance(var2, str): it = self.env[var2] else: it = var2 if isinstance(tmp, str): return [tmp % x for x in it] else: lst = [] for y in it: lst.extend(tmp) lst.append(y) return lst def __str__(self): "string to display to the user" name = self.__class__.__name__ if self.outputs: if name.endswith(('lib', 'program')) or not self.inputs: node = self.outputs[0] return node.path_from(node.ctx.launch_node()) if not (self.inputs or self.outputs): return self.__class__.__name__ if len(self.inputs) == 1: node = self.inputs[0] return node.path_from(node.ctx.launch_node()) src_str = ' '.join([a.path_from(a.ctx.launch_node()) for a in self.inputs]) tgt_str = ' '.join([a.path_from(a.ctx.launch_node()) for a in self.outputs]) if self.outputs: sep = ' -> ' else: sep = '' return '%s: %s%s%s' % (self.__class__.__name__, src_str, sep, tgt_str) def keyword(self): "Display keyword used to prettify the console outputs" name = self.__class__.__name__ if name.endswith(('lib', 'program')): return 'Linking' if len(self.inputs) == 1 and len(self.outputs) == 1: return 'Compiling' if not self.inputs: if self.outputs: return 'Creating' else: return 'Running' return 'Processing' def __repr__(self): "for debugging purposes" try: ins = ",".join([x.name for x in self.inputs]) outs = ",".join([x.name for x in self.outputs]) except AttributeError: ins = ",".join([str(x) for x in self.inputs]) outs = ",".join([str(x) for x in self.outputs]) return "".join(['\n\t{task %r: ' % id(self), self.__class__.__name__, " ", ins, " -> ", outs, '}']) def uid(self): """ Returns an identifier used to determine if tasks are up-to-date. Since the identifier will be stored between executions, it must be: - unique for a task: no two tasks return the same value (for a given build context) - the same for a given task instance By default, the node paths, the class name, and the function are used as inputs to compute a hash. The pointer to the object (python built-in 'id') will change between build executions, and must be avoided in such hashes. :return: hash value :rtype: string """ try: return self.uid_ except AttributeError: m = Utils.md5(self.__class__.__name__) up = m.update for x in self.inputs + self.outputs: up(x.abspath()) self.uid_ = m.digest() return self.uid_ def set_inputs(self, inp): """ Appends the nodes to the *inputs* list :param inp: input nodes :type inp: node or list of nodes """ if isinstance(inp, list): self.inputs += inp else: self.inputs.append(inp) def set_outputs(self, out): """ Appends the nodes to the *outputs* list :param out: output nodes :type out: node or list of nodes """ if isinstance(out, list): self.outputs += out else: self.outputs.append(out) def set_run_after(self, task): """ Run this task only after the given *task*. Calling this method from :py:meth:`waflib.Task.Task.runnable_status` may cause build deadlocks; see :py:meth:`waflib.Tools.fc.fc.runnable_status` for details. :param task: task :type task: :py:class:`waflib.Task.Task` """ assert isinstance(task, Task) self.run_after.add(task) def signature(self): """ Task signatures are stored between build executions, they are use to track the changes made to the input nodes (not to the outputs!). The signature hashes data from various sources: * explicit dependencies: files listed in the inputs (list of node objects) :py:meth:`waflib.Task.Task.sig_explicit_deps` * implicit dependencies: list of nodes returned by scanner methods (when present) :py:meth:`waflib.Task.Task.sig_implicit_deps` * hashed data: variables/values read from task.vars/task.env :py:meth:`waflib.Task.Task.sig_vars` If the signature is expected to give a different result, clear the cache kept in ``self.cache_sig``:: from waflib import Task class cls(Task.Task): def signature(self): sig = super(Task.Task, self).signature() delattr(self, 'cache_sig') return super(Task.Task, self).signature() :return: the signature value :rtype: string or bytes """ try: return self.cache_sig except AttributeError: pass self.m = Utils.md5(self.hcode) # explicit deps self.sig_explicit_deps() # env vars self.sig_vars() # implicit deps / scanner results if self.scan: try: self.sig_implicit_deps() except Errors.TaskRescan: return self.signature() ret = self.cache_sig = self.m.digest() return ret def runnable_status(self): """ Returns the Task status :return: a task state in :py:const:`waflib.Task.RUN_ME`, :py:const:`waflib.Task.SKIP_ME`, :py:const:`waflib.Task.CANCEL_ME` or :py:const:`waflib.Task.ASK_LATER`. :rtype: int """ bld = self.generator.bld if bld.is_install < 0: return SKIP_ME for t in self.run_after: if not t.hasrun: return ASK_LATER elif t.hasrun < SKIPPED: # a dependency has an error return CANCEL_ME # first compute the signature try: new_sig = self.signature() except Errors.TaskNotReady: return ASK_LATER # compare the signature to a signature computed previously key = self.uid() try: prev_sig = bld.task_sigs[key] except KeyError: Logs.debug('task: task %r must run: it was never run before or the task code changed', self) return RUN_ME if new_sig != prev_sig: Logs.debug('task: task %r must run: the task signature changed', self) return RUN_ME # compare the signatures of the outputs for node in self.outputs: sig = bld.node_sigs.get(node) if not sig: Logs.debug('task: task %r must run: an output node has no signature', self) return RUN_ME if sig != key: Logs.debug('task: task %r must run: an output node was produced by another task', self) return RUN_ME if not node.exists(): Logs.debug('task: task %r must run: an output node does not exist', self) return RUN_ME return (self.always_run and RUN_ME) or SKIP_ME def post_run(self): """ Called after successful execution to record that the task has run by updating the entry in :py:attr:`waflib.Build.BuildContext.task_sigs`. """ bld = self.generator.bld for node in self.outputs: if not node.exists(): self.hasrun = MISSING self.err_msg = '-> missing file: %r' % node.abspath() raise Errors.WafError(self.err_msg) bld.node_sigs[node] = self.uid() # make sure this task produced the files in question bld.task_sigs[self.uid()] = self.signature() if not self.keep_last_cmd: try: del self.last_cmd except AttributeError: pass def sig_explicit_deps(self): """ Used by :py:meth:`waflib.Task.Task.signature`; it hashes :py:attr:`waflib.Task.Task.inputs` and :py:attr:`waflib.Task.Task.dep_nodes` signatures. """ bld = self.generator.bld upd = self.m.update # the inputs for x in self.inputs + self.dep_nodes: upd(x.get_bld_sig()) # manual dependencies, they can slow down the builds if bld.deps_man: additional_deps = bld.deps_man for x in self.inputs + self.outputs: try: d = additional_deps[x] except KeyError: continue for v in d: try: v = v.get_bld_sig() except AttributeError: if hasattr(v, '__call__'): v = v() # dependency is a function, call it upd(v) def sig_deep_inputs(self): """ Enable rebuilds on input files task signatures. Not used by default. Example: hashes of output programs can be unchanged after being re-linked, despite the libraries being different. This method can thus prevent stale unit test results (waf_unit_test.py). Hashing input file timestamps is another possibility for the implementation. This may cause unnecessary rebuilds when input tasks are frequently executed. Here is an implementation example:: lst = [] for node in self.inputs + self.dep_nodes: st = os.stat(node.abspath()) lst.append(st.st_mtime) lst.append(st.st_size) self.m.update(Utils.h_list(lst)) The downside of the implementation is that it absolutely requires all build directory files to be declared within the current build. """ bld = self.generator.bld lst = [bld.task_sigs[bld.node_sigs[node]] for node in (self.inputs + self.dep_nodes) if node.is_bld()] self.m.update(Utils.h_list(lst)) def sig_vars(self): """ Used by :py:meth:`waflib.Task.Task.signature`; it hashes :py:attr:`waflib.Task.Task.env` variables/values When overriding this method, and if scriptlet expressions are used, make sure to follow the code in :py:meth:`waflib.Task.Task.compile_sig_vars` to enable dependencies on scriptlet results. This method may be replaced on subclasses by the metaclass to force dependencies on scriptlet code. """ sig = self.generator.bld.hash_env_vars(self.env, self.vars) self.m.update(sig) scan = None """ This method, when provided, returns a tuple containing: * a list of nodes corresponding to real files * a list of names for files not found in path_lst For example:: from waflib.Task import Task class mytask(Task): def scan(self, node): return ([], []) The first and second lists in the tuple are stored in :py:attr:`waflib.Build.BuildContext.node_deps` and :py:attr:`waflib.Build.BuildContext.raw_deps` respectively. """ def sig_implicit_deps(self): """ Used by :py:meth:`waflib.Task.Task.signature`; it hashes node signatures obtained by scanning for dependencies (:py:meth:`waflib.Task.Task.scan`). The exception :py:class:`waflib.Errors.TaskRescan` is thrown when a file has changed. In this case, the method :py:meth:`waflib.Task.Task.signature` is called once again, and return here to call :py:meth:`waflib.Task.Task.scan` and searching for dependencies. """ bld = self.generator.bld # get the task signatures from previous runs key = self.uid() prev = bld.imp_sigs.get(key, []) # for issue #379 if prev: try: if prev == self.compute_sig_implicit_deps(): return prev except Errors.TaskNotReady: raise except EnvironmentError: # when a file was renamed, remove the stale nodes (headers in folders without source files) # this will break the order calculation for headers created during the build in the source directory (should be uncommon) # the behaviour will differ when top != out for x in bld.node_deps.get(self.uid(), []): if not x.is_bld() and not x.exists(): try: del x.parent.children[x.name] except KeyError: pass del bld.imp_sigs[key] raise Errors.TaskRescan('rescan') # no previous run or the signature of the dependencies has changed, rescan the dependencies (bld.node_deps[key], bld.raw_deps[key]) = self.scan() if Logs.verbose: Logs.debug('deps: scanner for %s: %r; unresolved: %r', self, bld.node_deps[key], bld.raw_deps[key]) # recompute the signature and return it try: bld.imp_sigs[key] = self.compute_sig_implicit_deps() except EnvironmentError: for k in bld.node_deps.get(self.uid(), []): if not k.exists(): Logs.warn('Dependency %r for %r is missing: check the task declaration and the build order!', k, self) raise def compute_sig_implicit_deps(self): """ Used by :py:meth:`waflib.Task.Task.sig_implicit_deps` for computing the actual hash of the :py:class:`waflib.Node.Node` returned by the scanner. :return: a hash value for the implicit dependencies :rtype: string or bytes """ upd = self.m.update self.are_implicit_nodes_ready() # scanner returns a node that does not have a signature # just *ignore* the error and let them figure out from the compiler output # waf -k behaviour for k in self.generator.bld.node_deps.get(self.uid(), []): upd(k.get_bld_sig()) return self.m.digest() def are_implicit_nodes_ready(self): """ For each node returned by the scanner, see if there is a task that creates it, and infer the build order This has a low performance impact on null builds (1.86s->1.66s) thanks to caching (28s->1.86s) """ bld = self.generator.bld try: cache = bld.dct_implicit_nodes except AttributeError: bld.dct_implicit_nodes = cache = {} # one cache per build group try: dct = cache[bld.current_group] except KeyError: dct = cache[bld.current_group] = {} for tsk in bld.cur_tasks: for x in tsk.outputs: dct[x] = tsk modified = False for x in bld.node_deps.get(self.uid(), []): if x in dct: self.run_after.add(dct[x]) modified = True if modified: for tsk in self.run_after: if not tsk.hasrun: #print "task is not ready..." raise Errors.TaskNotReady('not ready') if sys.hexversion > 0x3000000: def uid(self): try: return self.uid_ except AttributeError: m = Utils.md5(self.__class__.__name__.encode('latin-1', 'xmlcharrefreplace')) up = m.update for x in self.inputs + self.outputs: up(x.abspath().encode('latin-1', 'xmlcharrefreplace')) self.uid_ = m.digest() return self.uid_ uid.__doc__ = Task.uid.__doc__ Task.uid = uid def is_before(t1, t2): """ Returns a non-zero value if task t1 is to be executed before task t2:: t1.ext_out = '.h' t2.ext_in = '.h' t2.after = ['t1'] t1.before = ['t2'] waflib.Task.is_before(t1, t2) # True :param t1: Task object :type t1: :py:class:`waflib.Task.Task` :param t2: Task object :type t2: :py:class:`waflib.Task.Task` """ to_list = Utils.to_list for k in to_list(t2.ext_in): if k in to_list(t1.ext_out): return 1 if t1.__class__.__name__ in to_list(t2.after): return 1 if t2.__class__.__name__ in to_list(t1.before): return 1 return 0 def set_file_constraints(tasks): """ Updates the ``run_after`` attribute of all tasks based on the task inputs and outputs :param tasks: tasks :type tasks: list of :py:class:`waflib.Task.Task` """ ins = Utils.defaultdict(set) outs = Utils.defaultdict(set) for x in tasks: for a in x.inputs: ins[a].add(x) for a in x.dep_nodes: ins[a].add(x) for a in x.outputs: outs[a].add(x) links = set(ins.keys()).intersection(outs.keys()) for k in links: for a in ins[k]: a.run_after.update(outs[k]) class TaskGroup(object): """ Wrap nxm task order constraints into a single object to prevent the creation of large list/set objects This is an optimization """ def __init__(self, prev, next): self.prev = prev self.next = next self.done = False def get_hasrun(self): for k in self.prev: if not k.hasrun: return NOT_RUN return SUCCESS hasrun = property(get_hasrun, None) def set_precedence_constraints(tasks): """ Updates the ``run_after`` attribute of all tasks based on the after/before/ext_out/ext_in attributes :param tasks: tasks :type tasks: list of :py:class:`waflib.Task.Task` """ cstr_groups = Utils.defaultdict(list) for x in tasks: h = x.hash_constraints() cstr_groups[h].append(x) keys = list(cstr_groups.keys()) maxi = len(keys) # this list should be short for i in range(maxi): t1 = cstr_groups[keys[i]][0] for j in range(i + 1, maxi): t2 = cstr_groups[keys[j]][0] # add the constraints based on the comparisons if is_before(t1, t2): a = i b = j elif is_before(t2, t1): a = j b = i else: continue a = cstr_groups[keys[a]] b = cstr_groups[keys[b]] if len(a) < 2 or len(b) < 2: for x in b: x.run_after.update(a) else: group = TaskGroup(set(a), set(b)) for x in b: x.run_after.add(group) def funex(c): """ Compiles a scriptlet expression into a Python function :param c: function to compile :type c: string :return: the function 'f' declared in the input string :rtype: function """ dc = {} exec(c, dc) return dc['f'] re_cond = re.compile(r'(?P\w+)|(?P\|)|(?P&)') re_novar = re.compile(r'^(SRC|TGT)\W+.*?$') reg_act = re.compile(r'(?P\\)|(?P\$\$)|(?P\$\{(?P\w+)(?P.*?)\})', re.M) def compile_fun_shell(line): """ Creates a compiled function to execute a process through a sub-shell """ extr = [] def repl(match): g = match.group if g('dollar'): return "$" elif g('backslash'): return '\\\\' elif g('subst'): extr.append((g('var'), g('code'))) return "%s" return None line = reg_act.sub(repl, line) or line dvars = [] def add_dvar(x): if x not in dvars: dvars.append(x) def replc(m): # performs substitutions and populates dvars if m.group('and'): return ' and ' elif m.group('or'): return ' or ' else: x = m.group('var') add_dvar(x) return 'env[%r]' % x parm = [] app = parm.append for (var, meth) in extr: if var == 'SRC': if meth: app('tsk.inputs%s' % meth) else: app('" ".join([a.path_from(cwdx) for a in tsk.inputs])') elif var == 'TGT': if meth: app('tsk.outputs%s' % meth) else: app('" ".join([a.path_from(cwdx) for a in tsk.outputs])') elif meth: if meth.startswith(':'): add_dvar(var) m = meth[1:] if m == 'SRC': m = '[a.path_from(cwdx) for a in tsk.inputs]' elif m == 'TGT': m = '[a.path_from(cwdx) for a in tsk.outputs]' elif re_novar.match(m): m = '[tsk.inputs%s]' % m[3:] elif re_novar.match(m): m = '[tsk.outputs%s]' % m[3:] else: add_dvar(m) if m[:3] not in ('tsk', 'gen', 'bld'): m = '%r' % m app('" ".join(tsk.colon(%r, %s))' % (var, m)) elif meth.startswith('?'): # In A?B|C output env.A if one of env.B or env.C is non-empty expr = re_cond.sub(replc, meth[1:]) app('p(%r) if (%s) else ""' % (var, expr)) else: call = '%s%s' % (var, meth) add_dvar(call) app(call) else: add_dvar(var) app("p('%s')" % var) if parm: parm = "%% (%s) " % (',\n\t\t'.join(parm)) else: parm = '' c = COMPILE_TEMPLATE_SHELL % (line, parm) Logs.debug('action: %s', c.strip().splitlines()) return (funex(c), dvars) reg_act_noshell = re.compile(r"(?P\s+)|(?P\$\{(?P\w+)(?P.*?)\})|(?P([^$ \t\n\r\f\v]|\$\$)+)", re.M) def compile_fun_noshell(line): """ Creates a compiled function to execute a process without a sub-shell """ buf = [] dvars = [] merge = False app = buf.append def add_dvar(x): if x not in dvars: dvars.append(x) def replc(m): # performs substitutions and populates dvars if m.group('and'): return ' and ' elif m.group('or'): return ' or ' else: x = m.group('var') add_dvar(x) return 'env[%r]' % x for m in reg_act_noshell.finditer(line): if m.group('space'): merge = False continue elif m.group('text'): app('[%r]' % m.group('text').replace('$$', '$')) elif m.group('subst'): var = m.group('var') code = m.group('code') if var == 'SRC': if code: app('[tsk.inputs%s]' % code) else: app('[a.path_from(cwdx) for a in tsk.inputs]') elif var == 'TGT': if code: app('[tsk.outputs%s]' % code) else: app('[a.path_from(cwdx) for a in tsk.outputs]') elif code: if code.startswith(':'): # a composed variable ${FOO:OUT} add_dvar(var) m = code[1:] if m == 'SRC': m = '[a.path_from(cwdx) for a in tsk.inputs]' elif m == 'TGT': m = '[a.path_from(cwdx) for a in tsk.outputs]' elif re_novar.match(m): m = '[tsk.inputs%s]' % m[3:] elif re_novar.match(m): m = '[tsk.outputs%s]' % m[3:] else: add_dvar(m) if m[:3] not in ('tsk', 'gen', 'bld'): m = '%r' % m app('tsk.colon(%r, %s)' % (var, m)) elif code.startswith('?'): # In A?B|C output env.A if one of env.B or env.C is non-empty expr = re_cond.sub(replc, code[1:]) app('to_list(env[%r] if (%s) else [])' % (var, expr)) else: # plain code such as ${tsk.inputs[0].abspath()} call = '%s%s' % (var, code) add_dvar(call) app('to_list(%s)' % call) else: # a plain variable such as # a plain variable like ${AR} app('to_list(env[%r])' % var) add_dvar(var) if merge: tmp = 'merge(%s, %s)' % (buf[-2], buf[-1]) del buf[-1] buf[-1] = tmp merge = True # next turn buf = ['lst.extend(%s)' % x for x in buf] fun = COMPILE_TEMPLATE_NOSHELL % "\n\t".join(buf) Logs.debug('action: %s', fun.strip().splitlines()) return (funex(fun), dvars) def compile_fun(line, shell=False): """ Parses a string expression such as '${CC} ${SRC} -o ${TGT}' and returns a pair containing: * The function created (compiled) for use as :py:meth:`waflib.Task.Task.run` * The list of variables that must cause rebuilds when *env* data is modified for example:: from waflib.Task import compile_fun compile_fun('cxx', '${CXX} -o ${TGT[0]} ${SRC} -I ${SRC[0].parent.bldpath()}') def build(bld): bld(source='wscript', rule='echo "foo\\${SRC[0].name}\\bar"') The env variables (CXX, ..) on the task must not hold dicts so as to preserve a consistent order. The reserved keywords ``TGT`` and ``SRC`` represent the task input and output nodes """ if isinstance(line, str): if line.find('<') > 0 or line.find('>') > 0 or line.find('&&') > 0: shell = True else: dvars_lst = [] funs_lst = [] for x in line: if isinstance(x, str): fun, dvars = compile_fun(x, shell) dvars_lst += dvars funs_lst.append(fun) else: # assume a function to let through funs_lst.append(x) def composed_fun(task): for x in funs_lst: ret = x(task) if ret: return ret return None return composed_fun, dvars_lst if shell: return compile_fun_shell(line) else: return compile_fun_noshell(line) def compile_sig_vars(vars): """ This method produces a sig_vars method suitable for subclasses that provide scriptlet code in their run_str code. If no such method can be created, this method returns None. The purpose of the sig_vars method returned is to ensures that rebuilds occur whenever the contents of the expression changes. This is the case B below:: import time # case A: regular variables tg = bld(rule='echo ${FOO}') tg.env.FOO = '%s' % time.time() # case B bld(rule='echo ${gen.foo}', foo='%s' % time.time()) :param vars: env variables such as CXXFLAGS or gen.foo :type vars: list of string :return: A sig_vars method relevant for dependencies if adequate, else None :rtype: A function, or None in most cases """ buf = [] for x in sorted(vars): if x[:3] in ('tsk', 'gen', 'bld'): buf.append('buf.append(%s)' % x) if buf: return funex(COMPILE_TEMPLATE_SIG_VARS % '\n\t'.join(buf)) return None def task_factory(name, func=None, vars=None, color='GREEN', ext_in=[], ext_out=[], before=[], after=[], shell=False, scan=None): """ Returns a new task subclass with the function ``run`` compiled from the line given. :param func: method run :type func: string or function :param vars: list of variables to hash :type vars: list of string :param color: color to use :type color: string :param shell: when *func* is a string, enable/disable the use of the shell :type shell: bool :param scan: method scan :type scan: function :rtype: :py:class:`waflib.Task.Task` """ params = { 'vars': vars or [], # function arguments are static, and this one may be modified by the class 'color': color, 'name': name, 'shell': shell, 'scan': scan, } if isinstance(func, str) or isinstance(func, tuple): params['run_str'] = func else: params['run'] = func cls = type(Task)(name, (Task,), params) classes[name] = cls if ext_in: cls.ext_in = Utils.to_list(ext_in) if ext_out: cls.ext_out = Utils.to_list(ext_out) if before: cls.before = Utils.to_list(before) if after: cls.after = Utils.to_list(after) return cls def deep_inputs(cls): """ Task class decorator to enable rebuilds on input files task signatures """ def sig_explicit_deps(self): Task.sig_explicit_deps(self) Task.sig_deep_inputs(self) cls.sig_explicit_deps = sig_explicit_deps return cls TaskBase = Task "Provided for compatibility reasons, TaskBase should not be used" class TaskSemaphore(object): """ Task semaphores provide a simple and efficient way of throttling the amount of a particular task to run concurrently. The throttling value is capped by the amount of maximum jobs, so for example, a `TaskSemaphore(10)` has no effect in a `-j2` build. Task semaphores are typically specified on the task class level:: class compile(waflib.Task.Task): semaphore = waflib.Task.TaskSemaphore(2) run_str = 'touch ${TGT}' Task semaphores are meant to be used by the build scheduler in the main thread, so there are no guarantees of thread safety. """ def __init__(self, num): """ :param num: maximum value of concurrent tasks :type num: int """ self.num = num self.locking = set() self.waiting = set() def is_locked(self): """Returns True if this semaphore cannot be acquired by more tasks""" return len(self.locking) >= self.num def acquire(self, tsk): """ Mark the semaphore as used by the given task (not re-entrant). :param tsk: task object :type tsk: :py:class:`waflib.Task.Task` :raises: :py:class:`IndexError` in case the resource is already acquired """ if self.is_locked(): raise IndexError('Cannot lock more %r' % self.locking) self.locking.add(tsk) def release(self, tsk): """ Mark the semaphore as unused by the given task. :param tsk: task object :type tsk: :py:class:`waflib.Task.Task` :raises: :py:class:`KeyError` in case the resource is not acquired by the task """ self.locking.remove(tsk) tdb-1.4.2/third_party/waf/waflib/TaskGen.py0000660000000000000000000006407613527011455020523 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005-2018 (ita) """ Task generators The class :py:class:`waflib.TaskGen.task_gen` encapsulates the creation of task objects (low-level code) The instances can have various parameters, but the creation of task nodes (Task.py) is deferred. To achieve this, various methods are called from the method "apply" """ import copy, re, os, functools from waflib import Task, Utils, Logs, Errors, ConfigSet, Node feats = Utils.defaultdict(set) """remember the methods declaring features""" HEADER_EXTS = ['.h', '.hpp', '.hxx', '.hh'] class task_gen(object): """ Instances of this class create :py:class:`waflib.Task.Task` when calling the method :py:meth:`waflib.TaskGen.task_gen.post` from the main thread. A few notes: * The methods to call (*self.meths*) can be specified dynamically (removing, adding, ..) * The 'features' are used to add methods to self.meths and then execute them * The attribute 'path' is a node representing the location of the task generator * The tasks created are added to the attribute *tasks* * The attribute 'idx' is a counter of task generators in the same path """ mappings = Utils.ordered_iter_dict() """Mappings are global file extension mappings that are retrieved in the order of definition""" prec = Utils.defaultdict(set) """Dict that holds the precedence execution rules for task generator methods""" def __init__(self, *k, **kw): """ Task generator objects predefine various attributes (source, target) for possible processing by process_rule (make-like rules) or process_source (extensions, misc methods) Tasks are stored on the attribute 'tasks'. They are created by calling methods listed in ``self.meths`` or referenced in the attribute ``features`` A topological sort is performed to execute the methods in correct order. The extra key/value elements passed in ``kw`` are set as attributes """ self.source = [] self.target = '' self.meths = [] """ List of method names to execute (internal) """ self.features = [] """ List of feature names for bringing new methods in """ self.tasks = [] """ Tasks created are added to this list """ if not 'bld' in kw: # task generators without a build context :-/ self.env = ConfigSet.ConfigSet() self.idx = 0 self.path = None else: self.bld = kw['bld'] self.env = self.bld.env.derive() self.path = kw.get('path', self.bld.path) # by default, emulate chdir when reading scripts # Provide a unique index per folder # This is part of a measure to prevent output file name collisions path = self.path.abspath() try: self.idx = self.bld.idx[path] = self.bld.idx.get(path, 0) + 1 except AttributeError: self.bld.idx = {} self.idx = self.bld.idx[path] = 1 # Record the global task generator count try: self.tg_idx_count = self.bld.tg_idx_count = self.bld.tg_idx_count + 1 except AttributeError: self.tg_idx_count = self.bld.tg_idx_count = 1 for key, val in kw.items(): setattr(self, key, val) def __str__(self): """Debugging helper""" return "" % (self.name, self.path.abspath()) def __repr__(self): """Debugging helper""" lst = [] for x in self.__dict__: if x not in ('env', 'bld', 'compiled_tasks', 'tasks'): lst.append("%s=%s" % (x, repr(getattr(self, x)))) return "bld(%s) in %s" % (", ".join(lst), self.path.abspath()) def get_cwd(self): """ Current working directory for the task generator, defaults to the build directory. This is still used in a few places but it should disappear at some point as the classes define their own working directory. :rtype: :py:class:`waflib.Node.Node` """ return self.bld.bldnode def get_name(self): """ If the attribute ``name`` is not set on the instance, the name is computed from the target name:: def build(bld): x = bld(name='foo') x.get_name() # foo y = bld(target='bar') y.get_name() # bar :rtype: string :return: name of this task generator """ try: return self._name except AttributeError: if isinstance(self.target, list): lst = [str(x) for x in self.target] name = self._name = ','.join(lst) else: name = self._name = str(self.target) return name def set_name(self, name): self._name = name name = property(get_name, set_name) def to_list(self, val): """ Ensures that a parameter is a list, see :py:func:`waflib.Utils.to_list` :type val: string or list of string :param val: input to return as a list :rtype: list """ if isinstance(val, str): return val.split() else: return val def post(self): """ Creates tasks for this task generators. The following operations are performed: #. The body of this method is called only once and sets the attribute ``posted`` #. The attribute ``features`` is used to add more methods in ``self.meths`` #. The methods are sorted by the precedence table ``self.prec`` or `:waflib:attr:waflib.TaskGen.task_gen.prec` #. The methods are then executed in order #. The tasks created are added to :py:attr:`waflib.TaskGen.task_gen.tasks` """ if getattr(self, 'posted', None): return False self.posted = True keys = set(self.meths) keys.update(feats['*']) # add the methods listed in the features self.features = Utils.to_list(self.features) for x in self.features: st = feats[x] if st: keys.update(st) elif not x in Task.classes: Logs.warn('feature %r does not exist - bind at least one method to it?', x) # copy the precedence table prec = {} prec_tbl = self.prec for x in prec_tbl: if x in keys: prec[x] = prec_tbl[x] # elements disconnected tmp = [] for a in keys: for x in prec.values(): if a in x: break else: tmp.append(a) tmp.sort(reverse=True) # topological sort out = [] while tmp: e = tmp.pop() if e in keys: out.append(e) try: nlst = prec[e] except KeyError: pass else: del prec[e] for x in nlst: for y in prec: if x in prec[y]: break else: tmp.append(x) tmp.sort(reverse=True) if prec: buf = ['Cycle detected in the method execution:'] for k, v in prec.items(): buf.append('- %s after %s' % (k, [x for x in v if x in prec])) raise Errors.WafError('\n'.join(buf)) self.meths = out # then we run the methods in order Logs.debug('task_gen: posting %s %d', self, id(self)) for x in out: try: v = getattr(self, x) except AttributeError: raise Errors.WafError('%r is not a valid task generator method' % x) Logs.debug('task_gen: -> %s (%d)', x, id(self)) v() Logs.debug('task_gen: posted %s', self.name) return True def get_hook(self, node): """ Returns the ``@extension`` method to call for a Node of a particular extension. :param node: Input file to process :type node: :py:class:`waflib.Tools.Node.Node` :return: A method able to process the input node by looking at the extension :rtype: function """ name = node.name for k in self.mappings: try: if name.endswith(k): return self.mappings[k] except TypeError: # regexps objects if k.match(name): return self.mappings[k] keys = list(self.mappings.keys()) raise Errors.WafError("File %r has no mapping in %r (load a waf tool?)" % (node, keys)) def create_task(self, name, src=None, tgt=None, **kw): """ Creates task instances. :param name: task class name :type name: string :param src: input nodes :type src: list of :py:class:`waflib.Tools.Node.Node` :param tgt: output nodes :type tgt: list of :py:class:`waflib.Tools.Node.Node` :return: A task object :rtype: :py:class:`waflib.Task.Task` """ task = Task.classes[name](env=self.env.derive(), generator=self) if src: task.set_inputs(src) if tgt: task.set_outputs(tgt) task.__dict__.update(kw) self.tasks.append(task) return task def clone(self, env): """ Makes a copy of a task generator. Once the copy is made, it is necessary to ensure that the it does not create the same output files as the original, or the same files may be compiled several times. :param env: A configuration set :type env: :py:class:`waflib.ConfigSet.ConfigSet` :return: A copy :rtype: :py:class:`waflib.TaskGen.task_gen` """ newobj = self.bld() for x in self.__dict__: if x in ('env', 'bld'): continue elif x in ('path', 'features'): setattr(newobj, x, getattr(self, x)) else: setattr(newobj, x, copy.copy(getattr(self, x))) newobj.posted = False if isinstance(env, str): newobj.env = self.bld.all_envs[env].derive() else: newobj.env = env.derive() return newobj def declare_chain(name='', rule=None, reentrant=None, color='BLUE', ext_in=[], ext_out=[], before=[], after=[], decider=None, scan=None, install_path=None, shell=False): """ Creates a new mapping and a task class for processing files by extension. See Tools/flex.py for an example. :param name: name for the task class :type name: string :param rule: function to execute or string to be compiled in a function :type rule: string or function :param reentrant: re-inject the output file in the process (done automatically, set to 0 to disable) :type reentrant: int :param color: color for the task output :type color: string :param ext_in: execute the task only after the files of such extensions are created :type ext_in: list of string :param ext_out: execute the task only before files of such extensions are processed :type ext_out: list of string :param before: execute instances of this task before classes of the given names :type before: list of string :param after: execute instances of this task after classes of the given names :type after: list of string :param decider: if present, function that returns a list of output file extensions (overrides ext_out for output files, but not for the build order) :type decider: function :param scan: scanner function for the task :type scan: function :param install_path: installation path for the output nodes :type install_path: string """ ext_in = Utils.to_list(ext_in) ext_out = Utils.to_list(ext_out) if not name: name = rule cls = Task.task_factory(name, rule, color=color, ext_in=ext_in, ext_out=ext_out, before=before, after=after, scan=scan, shell=shell) def x_file(self, node): if ext_in: _ext_in = ext_in[0] tsk = self.create_task(name, node) cnt = 0 ext = decider(self, node) if decider else cls.ext_out for x in ext: k = node.change_ext(x, ext_in=_ext_in) tsk.outputs.append(k) if reentrant != None: if cnt < int(reentrant): self.source.append(k) else: # reinject downstream files into the build for y in self.mappings: # ~ nfile * nextensions :-/ if k.name.endswith(y): self.source.append(k) break cnt += 1 if install_path: self.install_task = self.add_install_files(install_to=install_path, install_from=tsk.outputs) return tsk for x in cls.ext_in: task_gen.mappings[x] = x_file return x_file def taskgen_method(func): """ Decorator that registers method as a task generator method. The function must accept a task generator as first parameter:: from waflib.TaskGen import taskgen_method @taskgen_method def mymethod(self): pass :param func: task generator method to add :type func: function :rtype: function """ setattr(task_gen, func.__name__, func) return func def feature(*k): """ Decorator that registers a task generator method that will be executed when the object attribute ``feature`` contains the corresponding key(s):: from waflib.Task import feature @feature('myfeature') def myfunction(self): print('that is my feature!') def build(bld): bld(features='myfeature') :param k: feature names :type k: list of string """ def deco(func): setattr(task_gen, func.__name__, func) for name in k: feats[name].update([func.__name__]) return func return deco def before_method(*k): """ Decorator that registera task generator method which will be executed before the functions of given name(s):: from waflib.TaskGen import feature, before @feature('myfeature') @before_method('fun2') def fun1(self): print('feature 1!') @feature('myfeature') def fun2(self): print('feature 2!') def build(bld): bld(features='myfeature') :param k: method names :type k: list of string """ def deco(func): setattr(task_gen, func.__name__, func) for fun_name in k: task_gen.prec[func.__name__].add(fun_name) return func return deco before = before_method def after_method(*k): """ Decorator that registers a task generator method which will be executed after the functions of given name(s):: from waflib.TaskGen import feature, after @feature('myfeature') @after_method('fun2') def fun1(self): print('feature 1!') @feature('myfeature') def fun2(self): print('feature 2!') def build(bld): bld(features='myfeature') :param k: method names :type k: list of string """ def deco(func): setattr(task_gen, func.__name__, func) for fun_name in k: task_gen.prec[fun_name].add(func.__name__) return func return deco after = after_method def extension(*k): """ Decorator that registers a task generator method which will be invoked during the processing of source files for the extension given:: from waflib import Task class mytask(Task): run_str = 'cp ${SRC} ${TGT}' @extension('.moo') def create_maa_file(self, node): self.create_task('mytask', node, node.change_ext('.maa')) def build(bld): bld(source='foo.moo') """ def deco(func): setattr(task_gen, func.__name__, func) for x in k: task_gen.mappings[x] = func return func return deco @taskgen_method def to_nodes(self, lst, path=None): """ Flatten the input list of string/nodes/lists into a list of nodes. It is used by :py:func:`waflib.TaskGen.process_source` and :py:func:`waflib.TaskGen.process_rule`. It is designed for source files, for folders, see :py:func:`waflib.Tools.ccroot.to_incnodes`: :param lst: input list :type lst: list of string and nodes :param path: path from which to search the nodes (by default, :py:attr:`waflib.TaskGen.task_gen.path`) :type path: :py:class:`waflib.Tools.Node.Node` :rtype: list of :py:class:`waflib.Tools.Node.Node` """ tmp = [] path = path or self.path find = path.find_resource if isinstance(lst, Node.Node): lst = [lst] for x in Utils.to_list(lst): if isinstance(x, str): node = find(x) elif hasattr(x, 'name'): node = x else: tmp.extend(self.to_nodes(x)) continue if not node: raise Errors.WafError('source not found: %r in %r' % (x, self)) tmp.append(node) return tmp @feature('*') def process_source(self): """ Processes each element in the attribute ``source`` by extension. #. The *source* list is converted through :py:meth:`waflib.TaskGen.to_nodes` to a list of :py:class:`waflib.Node.Node` first. #. File extensions are mapped to methods having the signature: ``def meth(self, node)`` by :py:meth:`waflib.TaskGen.extension` #. The method is retrieved through :py:meth:`waflib.TaskGen.task_gen.get_hook` #. When called, the methods may modify self.source to append more source to process #. The mappings can map an extension or a filename (see the code below) """ self.source = self.to_nodes(getattr(self, 'source', [])) for node in self.source: self.get_hook(node)(self, node) @feature('*') @before_method('process_source') def process_rule(self): """ Processes the attribute ``rule``. When present, :py:meth:`waflib.TaskGen.process_source` is disabled:: def build(bld): bld(rule='cp ${SRC} ${TGT}', source='wscript', target='bar.txt') Main attributes processed: * rule: command to execute, it can be a tuple of strings for multiple commands * chmod: permissions for the resulting files (integer value such as Utils.O755) * shell: set to False to execute the command directly (default is True to use a shell) * scan: scanner function * vars: list of variables to trigger rebuilds, such as CFLAGS * cls_str: string to display when executing the task * cls_keyword: label to display when executing the task * cache_rule: by default, try to re-use similar classes, set to False to disable * source: list of Node or string objects representing the source files required by this task * target: list of Node or string objects representing the files that this task creates * cwd: current working directory (Node or string) * stdout: standard output, set to None to prevent waf from capturing the text * stderr: standard error, set to None to prevent waf from capturing the text * timeout: timeout for command execution (Python 3) * always: whether to always run the command (False by default) * deep_inputs: whether the task must depend on the input file tasks too (False by default) """ if not getattr(self, 'rule', None): return # create the task class name = str(getattr(self, 'name', None) or self.target or getattr(self.rule, '__name__', self.rule)) # or we can put the class in a cache for performance reasons try: cache = self.bld.cache_rule_attr except AttributeError: cache = self.bld.cache_rule_attr = {} chmod = getattr(self, 'chmod', None) shell = getattr(self, 'shell', True) color = getattr(self, 'color', 'BLUE') scan = getattr(self, 'scan', None) _vars = getattr(self, 'vars', []) cls_str = getattr(self, 'cls_str', None) cls_keyword = getattr(self, 'cls_keyword', None) use_cache = getattr(self, 'cache_rule', 'True') deep_inputs = getattr(self, 'deep_inputs', False) scan_val = has_deps = hasattr(self, 'deps') if scan: scan_val = id(scan) key = Utils.h_list((name, self.rule, chmod, shell, color, cls_str, cls_keyword, scan_val, _vars, deep_inputs)) cls = None if use_cache: try: cls = cache[key] except KeyError: pass if not cls: rule = self.rule if chmod is not None: def chmod_fun(tsk): for x in tsk.outputs: os.chmod(x.abspath(), tsk.generator.chmod) if isinstance(rule, tuple): rule = list(rule) rule.append(chmod_fun) rule = tuple(rule) else: rule = (rule, chmod_fun) cls = Task.task_factory(name, rule, _vars, shell=shell, color=color) if cls_str: setattr(cls, '__str__', self.cls_str) if cls_keyword: setattr(cls, 'keyword', self.cls_keyword) if deep_inputs: Task.deep_inputs(cls) if scan: cls.scan = self.scan elif has_deps: def scan(self): nodes = [] for x in self.generator.to_list(getattr(self.generator, 'deps', None)): node = self.generator.path.find_resource(x) if not node: self.generator.bld.fatal('Could not find %r (was it declared?)' % x) nodes.append(node) return [nodes, []] cls.scan = scan if use_cache: cache[key] = cls # now create one instance tsk = self.create_task(name) for x in ('after', 'before', 'ext_in', 'ext_out'): setattr(tsk, x, getattr(self, x, [])) if hasattr(self, 'stdout'): tsk.stdout = self.stdout if hasattr(self, 'stderr'): tsk.stderr = self.stderr if getattr(self, 'timeout', None): tsk.timeout = self.timeout if getattr(self, 'always', None): tsk.always_run = True if getattr(self, 'target', None): if isinstance(self.target, str): self.target = self.target.split() if not isinstance(self.target, list): self.target = [self.target] for x in self.target: if isinstance(x, str): tsk.outputs.append(self.path.find_or_declare(x)) else: x.parent.mkdir() # if a node was given, create the required folders tsk.outputs.append(x) if getattr(self, 'install_path', None): self.install_task = self.add_install_files(install_to=self.install_path, install_from=tsk.outputs, chmod=getattr(self, 'chmod', Utils.O644)) if getattr(self, 'source', None): tsk.inputs = self.to_nodes(self.source) # bypass the execution of process_source by setting the source to an empty list self.source = [] if getattr(self, 'cwd', None): tsk.cwd = self.cwd if isinstance(tsk.run, functools.partial): # Python documentation says: "partial objects defined in classes # behave like static methods and do not transform into bound # methods during instance attribute look-up." tsk.run = functools.partial(tsk.run, tsk) @feature('seq') def sequence_order(self): """ Adds a strict sequential constraint between the tasks generated by task generators. It works because task generators are posted in order. It will not post objects which belong to other folders. Example:: bld(features='javac seq') bld(features='jar seq') To start a new sequence, set the attribute seq_start, for example:: obj = bld(features='seq') obj.seq_start = True Note that the method is executed in last position. This is more an example than a widely-used solution. """ if self.meths and self.meths[-1] != 'sequence_order': self.meths.append('sequence_order') return if getattr(self, 'seq_start', None): return # all the tasks previously declared must be run before these if getattr(self.bld, 'prev', None): self.bld.prev.post() for x in self.bld.prev.tasks: for y in self.tasks: y.set_run_after(x) self.bld.prev = self re_m4 = re.compile(r'@(\w+)@', re.M) class subst_pc(Task.Task): """ Creates *.pc* files from *.pc.in*. The task is executed whenever an input variable used in the substitution changes. """ def force_permissions(self): "Private for the time being, we will probably refactor this into run_str=[run1,chmod]" if getattr(self.generator, 'chmod', None): for x in self.outputs: os.chmod(x.abspath(), self.generator.chmod) def run(self): "Substitutes variables in a .in file" if getattr(self.generator, 'is_copy', None): for i, x in enumerate(self.outputs): x.write(self.inputs[i].read('rb'), 'wb') stat = os.stat(self.inputs[i].abspath()) # Preserve mtime of the copy os.utime(self.outputs[i].abspath(), (stat.st_atime, stat.st_mtime)) self.force_permissions() return None if getattr(self.generator, 'fun', None): ret = self.generator.fun(self) if not ret: self.force_permissions() return ret code = self.inputs[0].read(encoding=getattr(self.generator, 'encoding', 'latin-1')) if getattr(self.generator, 'subst_fun', None): code = self.generator.subst_fun(self, code) if code is not None: self.outputs[0].write(code, encoding=getattr(self.generator, 'encoding', 'latin-1')) self.force_permissions() return None # replace all % by %% to prevent errors by % signs code = code.replace('%', '%%') # extract the vars foo into lst and replace @foo@ by %(foo)s lst = [] def repl(match): g = match.group if g(1): lst.append(g(1)) return "%%(%s)s" % g(1) return '' code = getattr(self.generator, 're_m4', re_m4).sub(repl, code) try: d = self.generator.dct except AttributeError: d = {} for x in lst: tmp = getattr(self.generator, x, '') or self.env[x] or self.env[x.upper()] try: tmp = ''.join(tmp) except TypeError: tmp = str(tmp) d[x] = tmp code = code % d self.outputs[0].write(code, encoding=getattr(self.generator, 'encoding', 'latin-1')) self.generator.bld.raw_deps[self.uid()] = lst # make sure the signature is updated try: delattr(self, 'cache_sig') except AttributeError: pass self.force_permissions() def sig_vars(self): """ Compute a hash (signature) of the variables used in the substitution """ bld = self.generator.bld env = self.env upd = self.m.update if getattr(self.generator, 'fun', None): upd(Utils.h_fun(self.generator.fun).encode()) if getattr(self.generator, 'subst_fun', None): upd(Utils.h_fun(self.generator.subst_fun).encode()) # raw_deps: persistent custom values returned by the scanner vars = self.generator.bld.raw_deps.get(self.uid(), []) # hash both env vars and task generator attributes act_sig = bld.hash_env_vars(env, vars) upd(act_sig) lst = [getattr(self.generator, x, '') for x in vars] upd(Utils.h_list(lst)) return self.m.digest() @extension('.pc.in') def add_pcfile(self, node): """ Processes *.pc.in* files to *.pc*. Installs the results to ``${PREFIX}/lib/pkgconfig/`` by default def build(bld): bld(source='foo.pc.in', install_path='${LIBDIR}/pkgconfig/') """ tsk = self.create_task('subst_pc', node, node.change_ext('.pc', '.pc.in')) self.install_task = self.add_install_files( install_to=getattr(self, 'install_path', '${LIBDIR}/pkgconfig/'), install_from=tsk.outputs) class subst(subst_pc): pass @feature('subst') @before_method('process_source', 'process_rule') def process_subst(self): """ Defines a transformation that substitutes the contents of *source* files to *target* files:: def build(bld): bld( features='subst', source='foo.c.in', target='foo.c', install_path='${LIBDIR}/pkgconfig', VAR = 'val' ) The input files are supposed to contain macros of the form *@VAR@*, where *VAR* is an argument of the task generator object. This method overrides the processing by :py:meth:`waflib.TaskGen.process_source`. """ src = Utils.to_list(getattr(self, 'source', [])) if isinstance(src, Node.Node): src = [src] tgt = Utils.to_list(getattr(self, 'target', [])) if isinstance(tgt, Node.Node): tgt = [tgt] if len(src) != len(tgt): raise Errors.WafError('invalid number of source/target for %r' % self) for x, y in zip(src, tgt): if not x or not y: raise Errors.WafError('null source or target for %r' % self) a, b = None, None if isinstance(x, str) and isinstance(y, str) and x == y: a = self.path.find_node(x) b = self.path.get_bld().make_node(y) if not os.path.isfile(b.abspath()): b.parent.mkdir() else: if isinstance(x, str): a = self.path.find_resource(x) elif isinstance(x, Node.Node): a = x if isinstance(y, str): b = self.path.find_or_declare(y) elif isinstance(y, Node.Node): b = y if not a: raise Errors.WafError('could not find %r for %r' % (x, self)) tsk = self.create_task('subst', a, b) for k in ('after', 'before', 'ext_in', 'ext_out'): val = getattr(self, k, None) if val: setattr(tsk, k, val) # paranoid safety measure for the general case foo.in->foo.h with ambiguous dependencies for xt in HEADER_EXTS: if b.name.endswith(xt): tsk.ext_in = tsk.ext_in + ['.h'] break inst_to = getattr(self, 'install_path', None) if inst_to: self.install_task = self.add_install_files(install_to=inst_to, install_from=b, chmod=getattr(self, 'chmod', Utils.O644)) self.source = [] tdb-1.4.2/third_party/waf/waflib/Tools/__init__.py0000660000000000000000000000010713444661622022014 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005-2018 (ita) tdb-1.4.2/third_party/waf/waflib/Tools/ar.py0000660000000000000000000000117213444661622020662 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2018 (ita) # Ralf Habacker, 2006 (rh) """ The **ar** program creates static libraries. This tool is almost always loaded from others (C, C++, D, etc) for static library support. """ from waflib.Configure import conf @conf def find_ar(conf): """Configuration helper used by C/C++ tools to enable the support for static libraries""" conf.load('ar') def configure(conf): """Finds the ar program and sets the default flags in ``conf.env.ARFLAGS``""" conf.find_program('ar', var='AR') conf.add_os_flags('ARFLAGS') if not conf.env.ARFLAGS: conf.env.ARFLAGS = ['rcs'] tdb-1.4.2/third_party/waf/waflib/Tools/asm.py0000660000000000000000000000315613444661622021044 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2008-2018 (ita) """ Assembly support, used by tools such as gas and nasm To declare targets using assembly:: def configure(conf): conf.load('gcc gas') def build(bld): bld( features='c cstlib asm', source = 'test.S', target = 'asmtest') bld( features='asm asmprogram', source = 'test.S', target = 'asmtest') Support for pure asm programs and libraries should also work:: def configure(conf): conf.load('nasm') conf.find_program('ld', 'ASLINK') def build(bld): bld( features='asm asmprogram', source = 'test.S', target = 'asmtest') """ from waflib import Task from waflib.Tools.ccroot import link_task, stlink_task from waflib.TaskGen import extension class asm(Task.Task): """ Compiles asm files by gas/nasm/yasm/... """ color = 'BLUE' run_str = '${AS} ${ASFLAGS} ${ASMPATH_ST:INCPATHS} ${DEFINES_ST:DEFINES} ${AS_SRC_F}${SRC} ${AS_TGT_F}${TGT}' @extension('.s', '.S', '.asm', '.ASM', '.spp', '.SPP') def asm_hook(self, node): """ Binds the asm extension to the asm task :param node: input file :type node: :py:class:`waflib.Node.Node` """ return self.create_compiled_task('asm', node) class asmprogram(link_task): "Links object files into a c program" run_str = '${ASLINK} ${ASLINKFLAGS} ${ASLNK_TGT_F}${TGT} ${ASLNK_SRC_F}${SRC}' ext_out = ['.bin'] inst_to = '${BINDIR}' class asmshlib(asmprogram): "Links object files into a c shared library" inst_to = '${LIBDIR}' class asmstlib(stlink_task): "Links object files into a c static library" pass # do not remove def configure(conf): conf.env.ASMPATH_ST = '-I%s' tdb-1.4.2/third_party/waf/waflib/Tools/bison.py0000660000000000000000000000224313444661622021372 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # John O'Meara, 2006 # Thomas Nagy 2009-2018 (ita) """ The **bison** program is a code generator which creates C or C++ files. The generated files are compiled into object files. """ from waflib import Task from waflib.TaskGen import extension class bison(Task.Task): """Compiles bison files""" color = 'BLUE' run_str = '${BISON} ${BISONFLAGS} ${SRC[0].abspath()} -o ${TGT[0].name}' ext_out = ['.h'] # just to make sure @extension('.y', '.yc', '.yy') def big_bison(self, node): """ Creates a bison task, which must be executed from the directory of the output file. """ has_h = '-d' in self.env.BISONFLAGS outs = [] if node.name.endswith('.yc'): outs.append(node.change_ext('.tab.cc')) if has_h: outs.append(node.change_ext('.tab.hh')) else: outs.append(node.change_ext('.tab.c')) if has_h: outs.append(node.change_ext('.tab.h')) tsk = self.create_task('bison', node, outs) tsk.cwd = node.parent.get_bld() # and the c/cxx file must be compiled too self.source.append(outs[0]) def configure(conf): """ Detects the *bison* program """ conf.find_program('bison', var='BISON') conf.env.BISONFLAGS = ['-d'] tdb-1.4.2/third_party/waf/waflib/Tools/c.py0000660000000000000000000000277113444661622020510 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2018 (ita) "Base for c programs/libraries" from waflib import TaskGen, Task from waflib.Tools import c_preproc from waflib.Tools.ccroot import link_task, stlink_task @TaskGen.extension('.c') def c_hook(self, node): "Binds the c file extensions create :py:class:`waflib.Tools.c.c` instances" if not self.env.CC and self.env.CXX: return self.create_compiled_task('cxx', node) return self.create_compiled_task('c', node) class c(Task.Task): "Compiles C files into object files" run_str = '${CC} ${ARCH_ST:ARCH} ${CFLAGS} ${FRAMEWORKPATH_ST:FRAMEWORKPATH} ${CPPPATH_ST:INCPATHS} ${DEFINES_ST:DEFINES} ${CC_SRC_F}${SRC} ${CC_TGT_F}${TGT[0].abspath()} ${CPPFLAGS}' vars = ['CCDEPS'] # unused variable to depend on, just in case ext_in = ['.h'] # set the build order easily by using ext_out=['.h'] scan = c_preproc.scan class cprogram(link_task): "Links object files into c programs" run_str = '${LINK_CC} ${LINKFLAGS} ${CCLNK_SRC_F}${SRC} ${CCLNK_TGT_F}${TGT[0].abspath()} ${RPATH_ST:RPATH} ${FRAMEWORKPATH_ST:FRAMEWORKPATH} ${FRAMEWORK_ST:FRAMEWORK} ${ARCH_ST:ARCH} ${STLIB_MARKER} ${STLIBPATH_ST:STLIBPATH} ${STLIB_ST:STLIB} ${SHLIB_MARKER} ${LIBPATH_ST:LIBPATH} ${LIB_ST:LIB} ${LDFLAGS}' ext_out = ['.bin'] vars = ['LINKDEPS'] inst_to = '${BINDIR}' class cshlib(cprogram): "Links object files into c shared libraries" inst_to = '${LIBDIR}' class cstlib(stlink_task): "Links object files into a c static libraries" pass # do not remove tdb-1.4.2/third_party/waf/waflib/Tools/c_aliases.py0000660000000000000000000000665313444661622022214 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005-2015 (ita) "base for all c/c++ programs and libraries" from waflib import Utils, Errors from waflib.Configure import conf def get_extensions(lst): """ Returns the file extensions for the list of files given as input :param lst: files to process :list lst: list of string or :py:class:`waflib.Node.Node` :return: list of file extensions :rtype: list of string """ ret = [] for x in Utils.to_list(lst): if not isinstance(x, str): x = x.name ret.append(x[x.rfind('.') + 1:]) return ret def sniff_features(**kw): """ Computes and returns the features required for a task generator by looking at the file extensions. This aimed for C/C++ mainly:: snif_features(source=['foo.c', 'foo.cxx'], type='shlib') # returns ['cxx', 'c', 'cxxshlib', 'cshlib'] :param source: source files to process :type source: list of string or :py:class:`waflib.Node.Node` :param type: object type in *program*, *shlib* or *stlib* :type type: string :return: the list of features for a task generator processing the source files :rtype: list of string """ exts = get_extensions(kw['source']) typ = kw['typ'] feats = [] # watch the order, cxx will have the precedence for x in 'cxx cpp c++ cc C'.split(): if x in exts: feats.append('cxx') break if 'c' in exts or 'vala' in exts or 'gs' in exts: feats.append('c') for x in 'f f90 F F90 for FOR'.split(): if x in exts: feats.append('fc') break if 'd' in exts: feats.append('d') if 'java' in exts: feats.append('java') return 'java' if typ in ('program', 'shlib', 'stlib'): will_link = False for x in feats: if x in ('cxx', 'd', 'fc', 'c'): feats.append(x + typ) will_link = True if not will_link and not kw.get('features', []): raise Errors.WafError('Cannot link from %r, try passing eg: features="c cprogram"?' % kw) return feats def set_features(kw, typ): """ Inserts data in the input dict *kw* based on existing data and on the type of target required (typ). :param kw: task generator parameters :type kw: dict :param typ: type of target :type typ: string """ kw['typ'] = typ kw['features'] = Utils.to_list(kw.get('features', [])) + Utils.to_list(sniff_features(**kw)) @conf def program(bld, *k, **kw): """ Alias for creating programs by looking at the file extensions:: def build(bld): bld.program(source='foo.c', target='app') # equivalent to: # bld(features='c cprogram', source='foo.c', target='app') """ set_features(kw, 'program') return bld(*k, **kw) @conf def shlib(bld, *k, **kw): """ Alias for creating shared libraries by looking at the file extensions:: def build(bld): bld.shlib(source='foo.c', target='app') # equivalent to: # bld(features='c cshlib', source='foo.c', target='app') """ set_features(kw, 'shlib') return bld(*k, **kw) @conf def stlib(bld, *k, **kw): """ Alias for creating static libraries by looking at the file extensions:: def build(bld): bld.stlib(source='foo.cpp', target='app') # equivalent to: # bld(features='cxx cxxstlib', source='foo.cpp', target='app') """ set_features(kw, 'stlib') return bld(*k, **kw) @conf def objects(bld, *k, **kw): """ Alias for creating object files by looking at the file extensions:: def build(bld): bld.objects(source='foo.c', target='app') # equivalent to: # bld(features='c', source='foo.c', target='app') """ set_features(kw, 'objects') return bld(*k, **kw) tdb-1.4.2/third_party/waf/waflib/Tools/c_config.py0000660000000000000000000012017613527011455022030 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005-2018 (ita) """ C/C++/D configuration helpers """ from __future__ import with_statement import os, re, shlex from waflib import Build, Utils, Task, Options, Logs, Errors, Runner from waflib.TaskGen import after_method, feature from waflib.Configure import conf WAF_CONFIG_H = 'config.h' """default name for the config.h file""" DEFKEYS = 'define_key' INCKEYS = 'include_key' SNIP_EMPTY_PROGRAM = ''' int main(int argc, char **argv) { (void)argc; (void)argv; return 0; } ''' MACRO_TO_DESTOS = { '__linux__' : 'linux', '__GNU__' : 'gnu', # hurd '__FreeBSD__' : 'freebsd', '__NetBSD__' : 'netbsd', '__OpenBSD__' : 'openbsd', '__sun' : 'sunos', '__hpux' : 'hpux', '__sgi' : 'irix', '_AIX' : 'aix', '__CYGWIN__' : 'cygwin', '__MSYS__' : 'cygwin', '_UWIN' : 'uwin', '_WIN64' : 'win32', '_WIN32' : 'win32', # Note about darwin: this is also tested with 'defined __APPLE__ && defined __MACH__' somewhere below in this file. '__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__' : 'darwin', '__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__' : 'darwin', # iphone '__QNX__' : 'qnx', '__native_client__' : 'nacl' # google native client platform } MACRO_TO_DEST_CPU = { '__x86_64__' : 'x86_64', '__amd64__' : 'x86_64', '__i386__' : 'x86', '__ia64__' : 'ia', '__mips__' : 'mips', '__sparc__' : 'sparc', '__alpha__' : 'alpha', '__aarch64__' : 'aarch64', '__thumb__' : 'thumb', '__arm__' : 'arm', '__hppa__' : 'hppa', '__powerpc__' : 'powerpc', '__ppc__' : 'powerpc', '__convex__' : 'convex', '__m68k__' : 'm68k', '__s390x__' : 's390x', '__s390__' : 's390', '__sh__' : 'sh', '__xtensa__' : 'xtensa', } @conf def parse_flags(self, line, uselib_store, env=None, force_static=False, posix=None): """ Parses flags from the input lines, and adds them to the relevant use variables:: def configure(conf): conf.parse_flags('-O3', 'FOO') # conf.env.CXXFLAGS_FOO = ['-O3'] # conf.env.CFLAGS_FOO = ['-O3'] :param line: flags :type line: string :param uselib_store: where to add the flags :type uselib_store: string :param env: config set or conf.env by default :type env: :py:class:`waflib.ConfigSet.ConfigSet` """ assert(isinstance(line, str)) env = env or self.env # Issue 811 and 1371 if posix is None: posix = True if '\\' in line: posix = ('\\ ' in line) or ('\\\\' in line) lex = shlex.shlex(line, posix=posix) lex.whitespace_split = True lex.commenters = '' lst = list(lex) # append_unique is not always possible # for example, apple flags may require both -arch i386 and -arch ppc uselib = uselib_store def app(var, val): env.append_value('%s_%s' % (var, uselib), val) def appu(var, val): env.append_unique('%s_%s' % (var, uselib), val) static = False while lst: x = lst.pop(0) st = x[:2] ot = x[2:] if st == '-I' or st == '/I': if not ot: ot = lst.pop(0) appu('INCLUDES', ot) elif st == '-i': tmp = [x, lst.pop(0)] app('CFLAGS', tmp) app('CXXFLAGS', tmp) elif st == '-D' or (env.CXX_NAME == 'msvc' and st == '/D'): # not perfect but.. if not ot: ot = lst.pop(0) app('DEFINES', ot) elif st == '-l': if not ot: ot = lst.pop(0) prefix = 'STLIB' if (force_static or static) else 'LIB' app(prefix, ot) elif st == '-L': if not ot: ot = lst.pop(0) prefix = 'STLIBPATH' if (force_static or static) else 'LIBPATH' appu(prefix, ot) elif x.startswith('/LIBPATH:'): prefix = 'STLIBPATH' if (force_static or static) else 'LIBPATH' appu(prefix, x.replace('/LIBPATH:', '')) elif x.startswith('-std='): prefix = 'CXXFLAGS' if '++' in x else 'CFLAGS' app(prefix, x) elif x.startswith('+') or x in ('-pthread', '-fPIC', '-fpic', '-fPIE', '-fpie'): app('CFLAGS', x) app('CXXFLAGS', x) app('LINKFLAGS', x) elif x == '-framework': appu('FRAMEWORK', lst.pop(0)) elif x.startswith('-F'): appu('FRAMEWORKPATH', x[2:]) elif x == '-Wl,-rpath' or x == '-Wl,-R': app('RPATH', lst.pop(0).lstrip('-Wl,')) elif x.startswith('-Wl,-R,'): app('RPATH', x[7:]) elif x.startswith('-Wl,-R'): app('RPATH', x[6:]) elif x.startswith('-Wl,-rpath,'): app('RPATH', x[11:]) elif x == '-Wl,-Bstatic' or x == '-Bstatic': static = True elif x == '-Wl,-Bdynamic' or x == '-Bdynamic': static = False elif x.startswith('-Wl') or x in ('-rdynamic', '-pie'): app('LINKFLAGS', x) elif x.startswith(('-m', '-f', '-dynamic', '-O', '-g')): # Adding the -W option breaks python builds on Openindiana app('CFLAGS', x) app('CXXFLAGS', x) elif x.startswith('-bundle'): app('LINKFLAGS', x) elif x.startswith(('-undefined', '-Xlinker')): arg = lst.pop(0) app('LINKFLAGS', [x, arg]) elif x.startswith(('-arch', '-isysroot')): tmp = [x, lst.pop(0)] app('CFLAGS', tmp) app('CXXFLAGS', tmp) app('LINKFLAGS', tmp) elif x.endswith(('.a', '.so', '.dylib', '.lib')): appu('LINKFLAGS', x) # not cool, #762 else: self.to_log('Unhandled flag %r' % x) @conf def validate_cfg(self, kw): """ Searches for the program *pkg-config* if missing, and validates the parameters to pass to :py:func:`waflib.Tools.c_config.exec_cfg`. :param path: the **-config program to use** (default is *pkg-config*) :type path: list of string :param msg: message to display to describe the test executed :type msg: string :param okmsg: message to display when the test is successful :type okmsg: string :param errmsg: message to display in case of error :type errmsg: string """ if not 'path' in kw: if not self.env.PKGCONFIG: self.find_program('pkg-config', var='PKGCONFIG') kw['path'] = self.env.PKGCONFIG # verify that exactly one action is requested s = ('atleast_pkgconfig_version' in kw) + ('modversion' in kw) + ('package' in kw) if s != 1: raise ValueError('exactly one of atleast_pkgconfig_version, modversion and package must be set') if not 'msg' in kw: if 'atleast_pkgconfig_version' in kw: kw['msg'] = 'Checking for pkg-config version >= %r' % kw['atleast_pkgconfig_version'] elif 'modversion' in kw: kw['msg'] = 'Checking for %r version' % kw['modversion'] else: kw['msg'] = 'Checking for %r' %(kw['package']) # let the modversion check set the okmsg to the detected version if not 'okmsg' in kw and not 'modversion' in kw: kw['okmsg'] = 'yes' if not 'errmsg' in kw: kw['errmsg'] = 'not found' # pkg-config version if 'atleast_pkgconfig_version' in kw: pass elif 'modversion' in kw: if not 'uselib_store' in kw: kw['uselib_store'] = kw['modversion'] if not 'define_name' in kw: kw['define_name'] = '%s_VERSION' % Utils.quote_define_name(kw['uselib_store']) else: if not 'uselib_store' in kw: kw['uselib_store'] = Utils.to_list(kw['package'])[0].upper() if not 'define_name' in kw: kw['define_name'] = self.have_define(kw['uselib_store']) @conf def exec_cfg(self, kw): """ Executes ``pkg-config`` or other ``-config`` applications to collect configuration flags: * if atleast_pkgconfig_version is given, check that pkg-config has the version n and return * if modversion is given, then return the module version * else, execute the *-config* program with the *args* and *variables* given, and set the flags on the *conf.env.FLAGS_name* variable :param atleast_pkgconfig_version: minimum pkg-config version to use (disable other tests) :type atleast_pkgconfig_version: string :param package: package name, for example *gtk+-2.0* :type package: string :param uselib_store: if the test is successful, define HAVE\\_*name*. It is also used to define *conf.env.FLAGS_name* variables. :type uselib_store: string :param modversion: if provided, return the version of the given module and define *name*\\_VERSION :type modversion: string :param args: arguments to give to *package* when retrieving flags :type args: list of string :param variables: return the values of particular variables :type variables: list of string :param define_variable: additional variables to define (also in conf.env.PKG_CONFIG_DEFINES) :type define_variable: dict(string: string) """ path = Utils.to_list(kw['path']) env = self.env.env or None if kw.get('pkg_config_path'): if not env: env = dict(self.environ) env['PKG_CONFIG_PATH'] = kw['pkg_config_path'] def define_it(): define_name = kw['define_name'] # by default, add HAVE_X to the config.h, else provide DEFINES_X for use=X if kw.get('global_define', 1): self.define(define_name, 1, False) else: self.env.append_unique('DEFINES_%s' % kw['uselib_store'], "%s=1" % define_name) if kw.get('add_have_to_env', 1): self.env[define_name] = 1 # pkg-config version if 'atleast_pkgconfig_version' in kw: cmd = path + ['--atleast-pkgconfig-version=%s' % kw['atleast_pkgconfig_version']] self.cmd_and_log(cmd, env=env) return # single version for a module if 'modversion' in kw: version = self.cmd_and_log(path + ['--modversion', kw['modversion']], env=env).strip() if not 'okmsg' in kw: kw['okmsg'] = version self.define(kw['define_name'], version) return version lst = [] + path defi = kw.get('define_variable') if not defi: defi = self.env.PKG_CONFIG_DEFINES or {} for key, val in defi.items(): lst.append('--define-variable=%s=%s' % (key, val)) static = kw.get('force_static', False) if 'args' in kw: args = Utils.to_list(kw['args']) if '--static' in args or '--static-libs' in args: static = True lst += args # tools like pkgconf expect the package argument after the -- ones -_- lst.extend(Utils.to_list(kw['package'])) # retrieving variables of a module if 'variables' in kw: v_env = kw.get('env', self.env) vars = Utils.to_list(kw['variables']) for v in vars: val = self.cmd_and_log(lst + ['--variable=' + v], env=env).strip() var = '%s_%s' % (kw['uselib_store'], v) v_env[var] = val return # so we assume the command-line will output flags to be parsed afterwards ret = self.cmd_and_log(lst, env=env) define_it() self.parse_flags(ret, kw['uselib_store'], kw.get('env', self.env), force_static=static, posix=kw.get('posix')) return ret @conf def check_cfg(self, *k, **kw): """ Checks for configuration flags using a **-config**-like program (pkg-config, sdl-config, etc). This wraps internal calls to :py:func:`waflib.Tools.c_config.validate_cfg` and :py:func:`waflib.Tools.c_config.exec_cfg` A few examples:: def configure(conf): conf.load('compiler_c') conf.check_cfg(package='glib-2.0', args='--libs --cflags') conf.check_cfg(package='pango') conf.check_cfg(package='pango', uselib_store='MYPANGO', args=['--cflags', '--libs']) conf.check_cfg(package='pango', args=['pango >= 0.1.0', 'pango < 9.9.9', '--cflags', '--libs'], msg="Checking for 'pango 0.1.0'") conf.check_cfg(path='sdl-config', args='--cflags --libs', package='', uselib_store='SDL') conf.check_cfg(path='mpicc', args='--showme:compile --showme:link', package='', uselib_store='OPEN_MPI', mandatory=False) # variables conf.check_cfg(package='gtk+-2.0', variables=['includedir', 'prefix'], uselib_store='FOO') print(conf.env.FOO_includedir) """ self.validate_cfg(kw) if 'msg' in kw: self.start_msg(kw['msg'], **kw) ret = None try: ret = self.exec_cfg(kw) except self.errors.WafError as e: if 'errmsg' in kw: self.end_msg(kw['errmsg'], 'YELLOW', **kw) if Logs.verbose > 1: self.to_log('Command failure: %s' % e) self.fatal('The configuration failed') else: if not ret: ret = True kw['success'] = ret if 'okmsg' in kw: self.end_msg(self.ret_msg(kw['okmsg'], kw), **kw) return ret def build_fun(bld): """ Build function that is used for running configuration tests with ``conf.check()`` """ if bld.kw['compile_filename']: node = bld.srcnode.make_node(bld.kw['compile_filename']) node.write(bld.kw['code']) o = bld(features=bld.kw['features'], source=bld.kw['compile_filename'], target='testprog') for k, v in bld.kw.items(): setattr(o, k, v) if not bld.kw.get('quiet'): bld.conf.to_log("==>\n%s\n<==" % bld.kw['code']) @conf def validate_c(self, kw): """ Pre-checks the parameters that will be given to :py:func:`waflib.Configure.run_build` :param compiler: c or cxx (tries to guess what is best) :type compiler: string :param type: cprogram, cshlib, cstlib - not required if *features are given directly* :type type: binary to create :param feature: desired features for the task generator that will execute the test, for example ``cxx cxxstlib`` :type feature: list of string :param fragment: provide a piece of code for the test (default is to let the system create one) :type fragment: string :param uselib_store: define variables after the test is executed (IMPORTANT!) :type uselib_store: string :param use: parameters to use for building (just like the normal *use* keyword) :type use: list of string :param define_name: define to set when the check is over :type define_name: string :param execute: execute the resulting binary :type execute: bool :param define_ret: if execute is set to True, use the execution output in both the define and the return value :type define_ret: bool :param header_name: check for a particular header :type header_name: string :param auto_add_header_name: if header_name was set, add the headers in env.INCKEYS so the next tests will include these headers :type auto_add_header_name: bool """ for x in ('type_name', 'field_name', 'function_name'): if x in kw: Logs.warn('Invalid argument %r in test' % x) if not 'build_fun' in kw: kw['build_fun'] = build_fun if not 'env' in kw: kw['env'] = self.env.derive() env = kw['env'] if not 'compiler' in kw and not 'features' in kw: kw['compiler'] = 'c' if env.CXX_NAME and Task.classes.get('cxx'): kw['compiler'] = 'cxx' if not self.env.CXX: self.fatal('a c++ compiler is required') else: if not self.env.CC: self.fatal('a c compiler is required') if not 'compile_mode' in kw: kw['compile_mode'] = 'c' if 'cxx' in Utils.to_list(kw.get('features', [])) or kw.get('compiler') == 'cxx': kw['compile_mode'] = 'cxx' if not 'type' in kw: kw['type'] = 'cprogram' if not 'features' in kw: if not 'header_name' in kw or kw.get('link_header_test', True): kw['features'] = [kw['compile_mode'], kw['type']] # "c ccprogram" else: kw['features'] = [kw['compile_mode']] else: kw['features'] = Utils.to_list(kw['features']) if not 'compile_filename' in kw: kw['compile_filename'] = 'test.c' + ((kw['compile_mode'] == 'cxx') and 'pp' or '') def to_header(dct): if 'header_name' in dct: dct = Utils.to_list(dct['header_name']) return ''.join(['#include <%s>\n' % x for x in dct]) return '' if 'framework_name' in kw: # OSX, not sure this is used anywhere fwkname = kw['framework_name'] if not 'uselib_store' in kw: kw['uselib_store'] = fwkname.upper() if not kw.get('no_header'): fwk = '%s/%s.h' % (fwkname, fwkname) if kw.get('remove_dot_h'): fwk = fwk[:-2] val = kw.get('header_name', []) kw['header_name'] = Utils.to_list(val) + [fwk] kw['msg'] = 'Checking for framework %s' % fwkname kw['framework'] = fwkname elif 'header_name' in kw: if not 'msg' in kw: kw['msg'] = 'Checking for header %s' % kw['header_name'] l = Utils.to_list(kw['header_name']) assert len(l), 'list of headers in header_name is empty' kw['code'] = to_header(kw) + SNIP_EMPTY_PROGRAM if not 'uselib_store' in kw: kw['uselib_store'] = l[0].upper() if not 'define_name' in kw: kw['define_name'] = self.have_define(l[0]) if 'lib' in kw: if not 'msg' in kw: kw['msg'] = 'Checking for library %s' % kw['lib'] if not 'uselib_store' in kw: kw['uselib_store'] = kw['lib'].upper() if 'stlib' in kw: if not 'msg' in kw: kw['msg'] = 'Checking for static library %s' % kw['stlib'] if not 'uselib_store' in kw: kw['uselib_store'] = kw['stlib'].upper() if 'fragment' in kw: # an additional code fragment may be provided to replace the predefined code # in custom headers kw['code'] = kw['fragment'] if not 'msg' in kw: kw['msg'] = 'Checking for code snippet' if not 'errmsg' in kw: kw['errmsg'] = 'no' for (flagsname,flagstype) in (('cxxflags','compiler'), ('cflags','compiler'), ('linkflags','linker')): if flagsname in kw: if not 'msg' in kw: kw['msg'] = 'Checking for %s flags %s' % (flagstype, kw[flagsname]) if not 'errmsg' in kw: kw['errmsg'] = 'no' if not 'execute' in kw: kw['execute'] = False if kw['execute']: kw['features'].append('test_exec') kw['chmod'] = Utils.O755 if not 'errmsg' in kw: kw['errmsg'] = 'not found' if not 'okmsg' in kw: kw['okmsg'] = 'yes' if not 'code' in kw: kw['code'] = SNIP_EMPTY_PROGRAM # if there are headers to append automatically to the next tests if self.env[INCKEYS]: kw['code'] = '\n'.join(['#include <%s>' % x for x in self.env[INCKEYS]]) + '\n' + kw['code'] # in case defines lead to very long command-lines if kw.get('merge_config_header') or env.merge_config_header: kw['code'] = '%s\n\n%s' % (self.get_config_header(), kw['code']) env.DEFINES = [] # modify the copy if not kw.get('success'): kw['success'] = None if 'define_name' in kw: self.undefine(kw['define_name']) if not 'msg' in kw: self.fatal('missing "msg" in conf.check(...)') @conf def post_check(self, *k, **kw): """ Sets the variables after a test executed in :py:func:`waflib.Tools.c_config.check` was run successfully """ is_success = 0 if kw['execute']: if kw['success'] is not None: if kw.get('define_ret'): is_success = kw['success'] else: is_success = (kw['success'] == 0) else: is_success = (kw['success'] == 0) if kw.get('define_name'): comment = kw.get('comment', '') define_name = kw['define_name'] if kw['execute'] and kw.get('define_ret') and isinstance(is_success, str): if kw.get('global_define', 1): self.define(define_name, is_success, quote=kw.get('quote', 1), comment=comment) else: if kw.get('quote', 1): succ = '"%s"' % is_success else: succ = int(is_success) val = '%s=%s' % (define_name, succ) var = 'DEFINES_%s' % kw['uselib_store'] self.env.append_value(var, val) else: if kw.get('global_define', 1): self.define_cond(define_name, is_success, comment=comment) else: var = 'DEFINES_%s' % kw['uselib_store'] self.env.append_value(var, '%s=%s' % (define_name, int(is_success))) # define conf.env.HAVE_X to 1 if kw.get('add_have_to_env', 1): if kw.get('uselib_store'): self.env[self.have_define(kw['uselib_store'])] = 1 elif kw['execute'] and kw.get('define_ret'): self.env[define_name] = is_success else: self.env[define_name] = int(is_success) if 'header_name' in kw: if kw.get('auto_add_header_name'): self.env.append_value(INCKEYS, Utils.to_list(kw['header_name'])) if is_success and 'uselib_store' in kw: from waflib.Tools import ccroot # See get_uselib_vars in ccroot.py _vars = set() for x in kw['features']: if x in ccroot.USELIB_VARS: _vars |= ccroot.USELIB_VARS[x] for k in _vars: x = k.lower() if x in kw: self.env.append_value(k + '_' + kw['uselib_store'], kw[x]) return is_success @conf def check(self, *k, **kw): """ Performs a configuration test by calling :py:func:`waflib.Configure.run_build`. For the complete list of parameters, see :py:func:`waflib.Tools.c_config.validate_c`. To force a specific compiler, pass ``compiler='c'`` or ``compiler='cxx'`` to the list of arguments Besides build targets, complete builds can be given through a build function. All files will be written to a temporary directory:: def build(bld): lib_node = bld.srcnode.make_node('libdir/liblc1.c') lib_node.parent.mkdir() lib_node.write('#include \\nint lib_func(void) { FILE *f = fopen("foo", "r");}\\n', 'w') bld(features='c cshlib', source=[lib_node], linkflags=conf.env.EXTRA_LDFLAGS, target='liblc') conf.check(build_fun=build, msg=msg) """ self.validate_c(kw) self.start_msg(kw['msg'], **kw) ret = None try: ret = self.run_build(*k, **kw) except self.errors.ConfigurationError: self.end_msg(kw['errmsg'], 'YELLOW', **kw) if Logs.verbose > 1: raise else: self.fatal('The configuration failed') else: kw['success'] = ret ret = self.post_check(*k, **kw) if not ret: self.end_msg(kw['errmsg'], 'YELLOW', **kw) self.fatal('The configuration failed %r' % ret) else: self.end_msg(self.ret_msg(kw['okmsg'], kw), **kw) return ret class test_exec(Task.Task): """ A task that runs programs after they are built. See :py:func:`waflib.Tools.c_config.test_exec_fun`. """ color = 'PINK' def run(self): if getattr(self.generator, 'rpath', None): if getattr(self.generator, 'define_ret', False): self.generator.bld.retval = self.generator.bld.cmd_and_log([self.inputs[0].abspath()]) else: self.generator.bld.retval = self.generator.bld.exec_command([self.inputs[0].abspath()]) else: env = self.env.env or {} env.update(dict(os.environ)) for var in ('LD_LIBRARY_PATH', 'DYLD_LIBRARY_PATH', 'PATH'): env[var] = self.inputs[0].parent.abspath() + os.path.pathsep + env.get(var, '') if getattr(self.generator, 'define_ret', False): self.generator.bld.retval = self.generator.bld.cmd_and_log([self.inputs[0].abspath()], env=env) else: self.generator.bld.retval = self.generator.bld.exec_command([self.inputs[0].abspath()], env=env) @feature('test_exec') @after_method('apply_link') def test_exec_fun(self): """ The feature **test_exec** is used to create a task that will to execute the binary created (link task output) during the build. The exit status will be set on the build context, so only one program may have the feature *test_exec*. This is used by configuration tests:: def configure(conf): conf.check(execute=True) """ self.create_task('test_exec', self.link_task.outputs[0]) @conf def check_cxx(self, *k, **kw): """ Runs a test with a task generator of the form:: conf.check(features='cxx cxxprogram', ...) """ kw['compiler'] = 'cxx' return self.check(*k, **kw) @conf def check_cc(self, *k, **kw): """ Runs a test with a task generator of the form:: conf.check(features='c cprogram', ...) """ kw['compiler'] = 'c' return self.check(*k, **kw) @conf def set_define_comment(self, key, comment): """ Sets a comment that will appear in the configuration header :type key: string :type comment: string """ coms = self.env.DEFINE_COMMENTS if not coms: coms = self.env.DEFINE_COMMENTS = {} coms[key] = comment or '' @conf def get_define_comment(self, key): """ Returns the comment associated to a define :type key: string """ coms = self.env.DEFINE_COMMENTS or {} return coms.get(key, '') @conf def define(self, key, val, quote=True, comment=''): """ Stores a single define and its state into ``conf.env.DEFINES``. The value is cast to an integer (0/1). :param key: define name :type key: string :param val: value :type val: int or string :param quote: enclose strings in quotes (yes by default) :type quote: bool """ assert isinstance(key, str) if not key: return if val is True: val = 1 elif val in (False, None): val = 0 if isinstance(val, int) or isinstance(val, float): s = '%s=%s' else: s = quote and '%s="%s"' or '%s=%s' app = s % (key, str(val)) ban = key + '=' lst = self.env.DEFINES for x in lst: if x.startswith(ban): lst[lst.index(x)] = app break else: self.env.append_value('DEFINES', app) self.env.append_unique(DEFKEYS, key) self.set_define_comment(key, comment) @conf def undefine(self, key, comment=''): """ Removes a global define from ``conf.env.DEFINES`` :param key: define name :type key: string """ assert isinstance(key, str) if not key: return ban = key + '=' lst = [x for x in self.env.DEFINES if not x.startswith(ban)] self.env.DEFINES = lst self.env.append_unique(DEFKEYS, key) self.set_define_comment(key, comment) @conf def define_cond(self, key, val, comment=''): """ Conditionally defines a name:: def configure(conf): conf.define_cond('A', True) # equivalent to: # if val: conf.define('A', 1) # else: conf.undefine('A') :param key: define name :type key: string :param val: value :type val: int or string """ assert isinstance(key, str) if not key: return if val: self.define(key, 1, comment=comment) else: self.undefine(key, comment=comment) @conf def is_defined(self, key): """ Indicates whether a particular define is globally set in ``conf.env.DEFINES``. :param key: define name :type key: string :return: True if the define is set :rtype: bool """ assert key and isinstance(key, str) ban = key + '=' for x in self.env.DEFINES: if x.startswith(ban): return True return False @conf def get_define(self, key): """ Returns the value of an existing define, or None if not found :param key: define name :type key: string :rtype: string """ assert key and isinstance(key, str) ban = key + '=' for x in self.env.DEFINES: if x.startswith(ban): return x[len(ban):] return None @conf def have_define(self, key): """ Returns a variable suitable for command-line or header use by removing invalid characters and prefixing it with ``HAVE_`` :param key: define name :type key: string :return: the input key prefixed by *HAVE_* and substitute any invalid characters. :rtype: string """ return (self.env.HAVE_PAT or 'HAVE_%s') % Utils.quote_define_name(key) @conf def write_config_header(self, configfile='', guard='', top=False, defines=True, headers=False, remove=True, define_prefix=''): """ Writes a configuration header containing defines and includes:: def configure(cnf): cnf.define('A', 1) cnf.write_config_header('config.h') This function only adds include guards (if necessary), consult :py:func:`waflib.Tools.c_config.get_config_header` for details on the body. :param configfile: path to the file to create (relative or absolute) :type configfile: string :param guard: include guard name to add, by default it is computed from the file name :type guard: string :param top: write the configuration header from the build directory (default is from the current path) :type top: bool :param defines: add the defines (yes by default) :type defines: bool :param headers: add #include in the file :type headers: bool :param remove: remove the defines after they are added (yes by default, works like in autoconf) :type remove: bool :type define_prefix: string :param define_prefix: prefix all the defines in the file with a particular prefix """ if not configfile: configfile = WAF_CONFIG_H waf_guard = guard or 'W_%s_WAF' % Utils.quote_define_name(configfile) node = top and self.bldnode or self.path.get_bld() node = node.make_node(configfile) node.parent.mkdir() lst = ['/* WARNING! All changes made to this file will be lost! */\n'] lst.append('#ifndef %s\n#define %s\n' % (waf_guard, waf_guard)) lst.append(self.get_config_header(defines, headers, define_prefix=define_prefix)) lst.append('\n#endif /* %s */\n' % waf_guard) node.write('\n'.join(lst)) # config files must not be removed on "waf clean" self.env.append_unique(Build.CFG_FILES, [node.abspath()]) if remove: for key in self.env[DEFKEYS]: self.undefine(key) self.env[DEFKEYS] = [] @conf def get_config_header(self, defines=True, headers=False, define_prefix=''): """ Creates the contents of a ``config.h`` file from the defines and includes set in conf.env.define_key / conf.env.include_key. No include guards are added. A prelude will be added from the variable env.WAF_CONFIG_H_PRELUDE if provided. This can be used to insert complex macros or include guards:: def configure(conf): conf.env.WAF_CONFIG_H_PRELUDE = '#include \\n' conf.write_config_header('config.h') :param defines: write the defines values :type defines: bool :param headers: write include entries for each element in self.env.INCKEYS :type headers: bool :type define_prefix: string :param define_prefix: prefix all the defines with a particular prefix :return: the contents of a ``config.h`` file :rtype: string """ lst = [] if self.env.WAF_CONFIG_H_PRELUDE: lst.append(self.env.WAF_CONFIG_H_PRELUDE) if headers: for x in self.env[INCKEYS]: lst.append('#include <%s>' % x) if defines: tbl = {} for k in self.env.DEFINES: a, _, b = k.partition('=') tbl[a] = b for k in self.env[DEFKEYS]: caption = self.get_define_comment(k) if caption: caption = ' /* %s */' % caption try: txt = '#define %s%s %s%s' % (define_prefix, k, tbl[k], caption) except KeyError: txt = '/* #undef %s%s */%s' % (define_prefix, k, caption) lst.append(txt) return "\n".join(lst) @conf def cc_add_flags(conf): """ Adds CFLAGS / CPPFLAGS from os.environ to conf.env """ conf.add_os_flags('CPPFLAGS', dup=False) conf.add_os_flags('CFLAGS', dup=False) @conf def cxx_add_flags(conf): """ Adds CXXFLAGS / CPPFLAGS from os.environ to conf.env """ conf.add_os_flags('CPPFLAGS', dup=False) conf.add_os_flags('CXXFLAGS', dup=False) @conf def link_add_flags(conf): """ Adds LINKFLAGS / LDFLAGS from os.environ to conf.env """ conf.add_os_flags('LINKFLAGS', dup=False) conf.add_os_flags('LDFLAGS', dup=False) @conf def cc_load_tools(conf): """ Loads the Waf c extensions """ if not conf.env.DEST_OS: conf.env.DEST_OS = Utils.unversioned_sys_platform() conf.load('c') @conf def cxx_load_tools(conf): """ Loads the Waf c++ extensions """ if not conf.env.DEST_OS: conf.env.DEST_OS = Utils.unversioned_sys_platform() conf.load('cxx') @conf def get_cc_version(conf, cc, gcc=False, icc=False, clang=False): """ Runs the preprocessor to determine the gcc/icc/clang version The variables CC_VERSION, DEST_OS, DEST_BINFMT and DEST_CPU will be set in *conf.env* :raise: :py:class:`waflib.Errors.ConfigurationError` """ cmd = cc + ['-dM', '-E', '-'] env = conf.env.env or None try: out, err = conf.cmd_and_log(cmd, output=0, input='\n'.encode(), env=env) except Errors.WafError: conf.fatal('Could not determine the compiler version %r' % cmd) if gcc: if out.find('__INTEL_COMPILER') >= 0: conf.fatal('The intel compiler pretends to be gcc') if out.find('__GNUC__') < 0 and out.find('__clang__') < 0: conf.fatal('Could not determine the compiler type') if icc and out.find('__INTEL_COMPILER') < 0: conf.fatal('Not icc/icpc') if clang and out.find('__clang__') < 0: conf.fatal('Not clang/clang++') if not clang and out.find('__clang__') >= 0: conf.fatal('Could not find gcc/g++ (only Clang), if renamed try eg: CC=gcc48 CXX=g++48 waf configure') k = {} if icc or gcc or clang: out = out.splitlines() for line in out: lst = shlex.split(line) if len(lst)>2: key = lst[1] val = lst[2] k[key] = val def isD(var): return var in k # Some documentation is available at http://predef.sourceforge.net # The names given to DEST_OS must match what Utils.unversioned_sys_platform() returns. if not conf.env.DEST_OS: conf.env.DEST_OS = '' for i in MACRO_TO_DESTOS: if isD(i): conf.env.DEST_OS = MACRO_TO_DESTOS[i] break else: if isD('__APPLE__') and isD('__MACH__'): conf.env.DEST_OS = 'darwin' elif isD('__unix__'): # unix must be tested last as it's a generic fallback conf.env.DEST_OS = 'generic' if isD('__ELF__'): conf.env.DEST_BINFMT = 'elf' elif isD('__WINNT__') or isD('__CYGWIN__') or isD('_WIN32'): conf.env.DEST_BINFMT = 'pe' if not conf.env.IMPLIBDIR: conf.env.IMPLIBDIR = conf.env.LIBDIR # for .lib or .dll.a files conf.env.LIBDIR = conf.env.BINDIR elif isD('__APPLE__'): conf.env.DEST_BINFMT = 'mac-o' if not conf.env.DEST_BINFMT: # Infer the binary format from the os name. conf.env.DEST_BINFMT = Utils.destos_to_binfmt(conf.env.DEST_OS) for i in MACRO_TO_DEST_CPU: if isD(i): conf.env.DEST_CPU = MACRO_TO_DEST_CPU[i] break Logs.debug('ccroot: dest platform: ' + ' '.join([conf.env[x] or '?' for x in ('DEST_OS', 'DEST_BINFMT', 'DEST_CPU')])) if icc: ver = k['__INTEL_COMPILER'] conf.env.CC_VERSION = (ver[:-2], ver[-2], ver[-1]) else: if isD('__clang__') and isD('__clang_major__'): conf.env.CC_VERSION = (k['__clang_major__'], k['__clang_minor__'], k['__clang_patchlevel__']) else: # older clang versions and gcc conf.env.CC_VERSION = (k['__GNUC__'], k['__GNUC_MINOR__'], k.get('__GNUC_PATCHLEVEL__', '0')) return k @conf def get_xlc_version(conf, cc): """ Returns the Aix compiler version :raise: :py:class:`waflib.Errors.ConfigurationError` """ cmd = cc + ['-qversion'] try: out, err = conf.cmd_and_log(cmd, output=0) except Errors.WafError: conf.fatal('Could not find xlc %r' % cmd) # the intention is to catch the 8.0 in "IBM XL C/C++ Enterprise Edition V8.0 for AIX..." for v in (r"IBM XL C/C\+\+.* V(?P\d*)\.(?P\d*)",): version_re = re.compile(v, re.I).search match = version_re(out or err) if match: k = match.groupdict() conf.env.CC_VERSION = (k['major'], k['minor']) break else: conf.fatal('Could not determine the XLC version.') @conf def get_suncc_version(conf, cc): """ Returns the Sun compiler version :raise: :py:class:`waflib.Errors.ConfigurationError` """ cmd = cc + ['-V'] try: out, err = conf.cmd_and_log(cmd, output=0) except Errors.WafError as e: # Older versions of the compiler exit with non-zero status when reporting their version if not (hasattr(e, 'returncode') and hasattr(e, 'stdout') and hasattr(e, 'stderr')): conf.fatal('Could not find suncc %r' % cmd) out = e.stdout err = e.stderr version = (out or err) version = version.splitlines()[0] # cc: Sun C 5.10 SunOS_i386 2009/06/03 # cc: Studio 12.5 Sun C++ 5.14 SunOS_sparc Beta 2015/11/17 # cc: WorkShop Compilers 5.0 98/12/15 C 5.0 version_re = re.compile(r'cc: (studio.*?|\s+)?(sun\s+(c\+\+|c)|(WorkShop\s+Compilers))?\s+(?P\d*)\.(?P\d*)', re.I).search match = version_re(version) if match: k = match.groupdict() conf.env.CC_VERSION = (k['major'], k['minor']) else: conf.fatal('Could not determine the suncc version.') # ============ the --as-needed flag should added during the configuration, not at runtime ========= @conf def add_as_needed(self): """ Adds ``--as-needed`` to the *LINKFLAGS* On some platforms, it is a default flag. In some cases (e.g., in NS-3) it is necessary to explicitly disable this feature with `-Wl,--no-as-needed` flag. """ if self.env.DEST_BINFMT == 'elf' and 'gcc' in (self.env.CXX_NAME, self.env.CC_NAME): self.env.append_unique('LINKFLAGS', '-Wl,--as-needed') # ============ parallel configuration class cfgtask(Task.Task): """ A task that executes build configuration tests (calls conf.check) Make sure to use locks if concurrent access to the same conf.env data is necessary. """ def __init__(self, *k, **kw): Task.Task.__init__(self, *k, **kw) self.run_after = set() def display(self): return '' def runnable_status(self): for x in self.run_after: if not x.hasrun: return Task.ASK_LATER return Task.RUN_ME def uid(self): return Utils.SIG_NIL def signature(self): return Utils.SIG_NIL def run(self): conf = self.conf bld = Build.BuildContext(top_dir=conf.srcnode.abspath(), out_dir=conf.bldnode.abspath()) bld.env = conf.env bld.init_dirs() bld.in_msg = 1 # suppress top-level start_msg bld.logger = self.logger bld.multicheck_task = self args = self.args try: if 'func' in args: bld.test(build_fun=args['func'], msg=args.get('msg', ''), okmsg=args.get('okmsg', ''), errmsg=args.get('errmsg', ''), ) else: args['multicheck_mandatory'] = args.get('mandatory', True) args['mandatory'] = True try: bld.check(**args) finally: args['mandatory'] = args['multicheck_mandatory'] except Exception: return 1 def process(self): Task.Task.process(self) if 'msg' in self.args: with self.generator.bld.multicheck_lock: self.conf.start_msg(self.args['msg']) if self.hasrun == Task.NOT_RUN: self.conf.end_msg('test cancelled', 'YELLOW') elif self.hasrun != Task.SUCCESS: self.conf.end_msg(self.args.get('errmsg', 'no'), 'YELLOW') else: self.conf.end_msg(self.args.get('okmsg', 'yes'), 'GREEN') @conf def multicheck(self, *k, **kw): """ Runs configuration tests in parallel; results are printed sequentially at the end of the build but each test must provide its own msg value to display a line:: def test_build(ctx): ctx.in_msg = True # suppress console outputs ctx.check_large_file(mandatory=False) conf.multicheck( {'header_name':'stdio.h', 'msg':'... stdio', 'uselib_store':'STDIO', 'global_define':False}, {'header_name':'xyztabcd.h', 'msg':'... optional xyztabcd.h', 'mandatory': False}, {'header_name':'stdlib.h', 'msg':'... stdlib', 'okmsg': 'aye', 'errmsg': 'nope'}, {'func': test_build, 'msg':'... testing an arbitrary build function', 'okmsg':'ok'}, msg = 'Checking for headers in parallel', mandatory = True, # mandatory tests raise an error at the end run_all_tests = True, # try running all tests ) The configuration tests may modify the values in conf.env in any order, and the define values can affect configuration tests being executed. It is hence recommended to provide `uselib_store` values with `global_define=False` to prevent such issues. """ self.start_msg(kw.get('msg', 'Executing %d configuration tests' % len(k)), **kw) # Force a copy so that threads append to the same list at least # no order is guaranteed, but the values should not disappear at least for var in ('DEFINES', DEFKEYS): self.env.append_value(var, []) self.env.DEFINE_COMMENTS = self.env.DEFINE_COMMENTS or {} # define a task object that will execute our tests class par(object): def __init__(self): self.keep = False self.task_sigs = {} self.progress_bar = 0 def total(self): return len(tasks) def to_log(self, *k, **kw): return bld = par() bld.keep = kw.get('run_all_tests', True) bld.imp_sigs = {} tasks = [] id_to_task = {} for dct in k: x = Task.classes['cfgtask'](bld=bld, env=None) tasks.append(x) x.args = dct x.bld = bld x.conf = self x.args = dct # bind a logger that will keep the info in memory x.logger = Logs.make_mem_logger(str(id(x)), self.logger) if 'id' in dct: id_to_task[dct['id']] = x # second pass to set dependencies with after_test/before_test for x in tasks: for key in Utils.to_list(x.args.get('before_tests', [])): tsk = id_to_task[key] if not tsk: raise ValueError('No test named %r' % key) tsk.run_after.add(x) for key in Utils.to_list(x.args.get('after_tests', [])): tsk = id_to_task[key] if not tsk: raise ValueError('No test named %r' % key) x.run_after.add(tsk) def it(): yield tasks while 1: yield [] bld.producer = p = Runner.Parallel(bld, Options.options.jobs) bld.multicheck_lock = Utils.threading.Lock() p.biter = it() self.end_msg('started') p.start() # flush the logs in order into the config.log for x in tasks: x.logger.memhandler.flush() self.start_msg('-> processing test results') if p.error: for x in p.error: if getattr(x, 'err_msg', None): self.to_log(x.err_msg) self.end_msg('fail', color='RED') raise Errors.WafError('There is an error in the library, read config.log for more information') failure_count = 0 for x in tasks: if x.hasrun not in (Task.SUCCESS, Task.NOT_RUN): failure_count += 1 if failure_count: self.end_msg(kw.get('errmsg', '%s test failed' % failure_count), color='YELLOW', **kw) else: self.end_msg('all ok', **kw) for x in tasks: if x.hasrun != Task.SUCCESS: if x.args.get('mandatory', True): self.fatal(kw.get('fatalmsg') or 'One of the tests has failed, read config.log for more information') @conf def check_gcc_o_space(self, mode='c'): if int(self.env.CC_VERSION[0]) > 4: # this is for old compilers return self.env.stash() if mode == 'c': self.env.CCLNK_TGT_F = ['-o', ''] elif mode == 'cxx': self.env.CXXLNK_TGT_F = ['-o', ''] features = '%s %sshlib' % (mode, mode) try: self.check(msg='Checking if the -o link must be split from arguments', fragment=SNIP_EMPTY_PROGRAM, features=features) except self.errors.ConfigurationError: self.env.revert() else: self.env.commit() tdb-1.4.2/third_party/waf/waflib/Tools/c_osx.py0000660000000000000000000001332613444661622021377 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy 2008-2018 (ita) """ MacOSX related tools """ import os, shutil, platform from waflib import Task, Utils from waflib.TaskGen import taskgen_method, feature, after_method, before_method app_info = ''' CFBundlePackageType APPL CFBundleGetInfoString Created by Waf CFBundleSignature ???? NOTE THIS IS A GENERATED FILE, DO NOT MODIFY CFBundleExecutable {app_name} ''' """ plist template """ @feature('c', 'cxx') def set_macosx_deployment_target(self): """ see WAF issue 285 and also and also http://trac.macports.org/ticket/17059 """ if self.env.MACOSX_DEPLOYMENT_TARGET: os.environ['MACOSX_DEPLOYMENT_TARGET'] = self.env.MACOSX_DEPLOYMENT_TARGET elif 'MACOSX_DEPLOYMENT_TARGET' not in os.environ: if Utils.unversioned_sys_platform() == 'darwin': os.environ['MACOSX_DEPLOYMENT_TARGET'] = '.'.join(platform.mac_ver()[0].split('.')[:2]) @taskgen_method def create_bundle_dirs(self, name, out): """ Creates bundle folders, used by :py:func:`create_task_macplist` and :py:func:`create_task_macapp` """ dir = out.parent.find_or_declare(name) dir.mkdir() macos = dir.find_or_declare(['Contents', 'MacOS']) macos.mkdir() return dir def bundle_name_for_output(out): name = out.name k = name.rfind('.') if k >= 0: name = name[:k] + '.app' else: name = name + '.app' return name @feature('cprogram', 'cxxprogram') @after_method('apply_link') def create_task_macapp(self): """ To compile an executable into a Mac application (a .app), set its *mac_app* attribute:: def build(bld): bld.shlib(source='a.c', target='foo', mac_app=True) To force *all* executables to be transformed into Mac applications:: def build(bld): bld.env.MACAPP = True bld.shlib(source='a.c', target='foo') """ if self.env.MACAPP or getattr(self, 'mac_app', False): out = self.link_task.outputs[0] name = bundle_name_for_output(out) dir = self.create_bundle_dirs(name, out) n1 = dir.find_or_declare(['Contents', 'MacOS', out.name]) self.apptask = self.create_task('macapp', self.link_task.outputs, n1) inst_to = getattr(self, 'install_path', '/Applications') + '/%s/Contents/MacOS/' % name self.add_install_files(install_to=inst_to, install_from=n1, chmod=Utils.O755) if getattr(self, 'mac_files', None): # this only accepts files; they will be installed as seen from mac_files_root mac_files_root = getattr(self, 'mac_files_root', None) if isinstance(mac_files_root, str): mac_files_root = self.path.find_node(mac_files_root) if not mac_files_root: self.bld.fatal('Invalid mac_files_root %r' % self.mac_files_root) res_dir = n1.parent.parent.make_node('Resources') inst_to = getattr(self, 'install_path', '/Applications') + '/%s/Resources' % name for node in self.to_nodes(self.mac_files): relpath = node.path_from(mac_files_root or node.parent) self.create_task('macapp', node, res_dir.make_node(relpath)) self.add_install_as(install_to=os.path.join(inst_to, relpath), install_from=node) if getattr(self.bld, 'is_install', None): # disable regular binary installation self.install_task.hasrun = Task.SKIP_ME @feature('cprogram', 'cxxprogram') @after_method('apply_link') def create_task_macplist(self): """ Creates a :py:class:`waflib.Tools.c_osx.macplist` instance. """ if self.env.MACAPP or getattr(self, 'mac_app', False): out = self.link_task.outputs[0] name = bundle_name_for_output(out) dir = self.create_bundle_dirs(name, out) n1 = dir.find_or_declare(['Contents', 'Info.plist']) self.plisttask = plisttask = self.create_task('macplist', [], n1) plisttask.context = { 'app_name': self.link_task.outputs[0].name, 'env': self.env } plist_ctx = getattr(self, 'plist_context', None) if (plist_ctx): plisttask.context.update(plist_ctx) if getattr(self, 'mac_plist', False): node = self.path.find_resource(self.mac_plist) if node: plisttask.inputs.append(node) else: plisttask.code = self.mac_plist else: plisttask.code = app_info inst_to = getattr(self, 'install_path', '/Applications') + '/%s/Contents/' % name self.add_install_files(install_to=inst_to, install_from=n1) @feature('cshlib', 'cxxshlib') @before_method('apply_link', 'propagate_uselib_vars') def apply_bundle(self): """ To make a bundled shared library (a ``.bundle``), set the *mac_bundle* attribute:: def build(bld): bld.shlib(source='a.c', target='foo', mac_bundle = True) To force *all* executables to be transformed into bundles:: def build(bld): bld.env.MACBUNDLE = True bld.shlib(source='a.c', target='foo') """ if self.env.MACBUNDLE or getattr(self, 'mac_bundle', False): self.env.LINKFLAGS_cshlib = self.env.LINKFLAGS_cxxshlib = [] # disable the '-dynamiclib' flag self.env.cshlib_PATTERN = self.env.cxxshlib_PATTERN = self.env.macbundle_PATTERN use = self.use = self.to_list(getattr(self, 'use', [])) if not 'MACBUNDLE' in use: use.append('MACBUNDLE') app_dirs = ['Contents', 'Contents/MacOS', 'Contents/Resources'] class macapp(Task.Task): """ Creates mac applications """ color = 'PINK' def run(self): self.outputs[0].parent.mkdir() shutil.copy2(self.inputs[0].srcpath(), self.outputs[0].abspath()) class macplist(Task.Task): """ Creates plist files """ color = 'PINK' ext_in = ['.bin'] def run(self): if getattr(self, 'code', None): txt = self.code else: txt = self.inputs[0].read() context = getattr(self, 'context', {}) txt = txt.format(**context) self.outputs[0].write(txt) tdb-1.4.2/third_party/waf/waflib/Tools/c_preproc.py0000660000000000000000000006606513527011455022243 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2018 (ita) """ C/C++ preprocessor for finding dependencies Reasons for using the Waf preprocessor by default #. Some c/c++ extensions (Qt) require a custom preprocessor for obtaining the dependencies (.moc files) #. Not all compilers provide .d files for obtaining the dependencies (portability) #. A naive file scanner will not catch the constructs such as "#include foo()" #. A naive file scanner will catch unnecessary dependencies (change an unused header -> recompile everything) Regarding the speed concerns: * the preprocessing is performed only when files must be compiled * the macros are evaluated only for #if/#elif/#include * system headers are not scanned by default Now if you do not want the Waf preprocessor, the tool +gccdeps* uses the .d files produced during the compilation to track the dependencies (useful when used with the boost libraries). It only works with gcc >= 4.4 though. A dumb preprocessor is also available in the tool *c_dumbpreproc* """ # TODO: more varargs, pragma once import re, string, traceback from waflib import Logs, Utils, Errors class PreprocError(Errors.WafError): pass FILE_CACHE_SIZE = 100000 LINE_CACHE_SIZE = 100000 POPFILE = '-' "Constant representing a special token used in :py:meth:`waflib.Tools.c_preproc.c_parser.start` iteration to switch to a header read previously" recursion_limit = 150 "Limit on the amount of files to read in the dependency scanner" go_absolute = False "Set to True to track headers on files in /usr/include, else absolute paths are ignored (but it becomes very slow)" standard_includes = ['/usr/local/include', '/usr/include'] if Utils.is_win32: standard_includes = [] use_trigraphs = 0 """Apply trigraph rules (False by default)""" # obsolete, do not use strict_quotes = 0 g_optrans = { 'not':'!', 'not_eq':'!', 'and':'&&', 'and_eq':'&=', 'or':'||', 'or_eq':'|=', 'xor':'^', 'xor_eq':'^=', 'bitand':'&', 'bitor':'|', 'compl':'~', } """Operators such as and/or/xor for c++. Set an empty dict to disable.""" # ignore #warning and #error re_lines = re.compile( '^[ \t]*(?:#|%:)[ \t]*(ifdef|ifndef|if|else|elif|endif|include|import|define|undef|pragma)[ \t]*(.*)\r*$', re.IGNORECASE | re.MULTILINE) """Match #include lines""" re_mac = re.compile(r"^[a-zA-Z_]\w*") """Match macro definitions""" re_fun = re.compile('^[a-zA-Z_][a-zA-Z0-9_]*[(]') """Match macro functions""" re_pragma_once = re.compile(r'^\s*once\s*', re.IGNORECASE) """Match #pragma once statements""" re_nl = re.compile('\\\\\r*\n', re.MULTILINE) """Match newlines""" re_cpp = re.compile(r'//.*?$|/\*.*?\*/|\'(?:\\.|[^\\\'])*\'|"(?:\\.|[^\\"])*"', re.DOTALL | re.MULTILINE ) """Filter C/C++ comments""" trig_def = [('??'+a, b) for a, b in zip("=-/!'()<>", r'#~\|^[]{}')] """Trigraph definitions""" chr_esc = {'0':0, 'a':7, 'b':8, 't':9, 'n':10, 'f':11, 'v':12, 'r':13, '\\':92, "'":39} """Escape characters""" NUM = 'i' """Number token""" OP = 'O' """Operator token""" IDENT = 'T' """Identifier token""" STR = 's' """String token""" CHAR = 'c' """Character token""" tok_types = [NUM, STR, IDENT, OP] """Token types""" exp_types = [ r"""0[xX](?P[a-fA-F0-9]+)(?P[uUlL]*)|L*?'(?P(\\.|[^\\'])+)'|(?P\d+)[Ee](?P[+-]*?\d+)(?P[fFlL]*)|(?P\d*\.\d+)([Ee](?P[+-]*?\d+))?(?P[fFlL]*)|(?P\d+\.\d*)([Ee](?P[+-]*?\d+))?(?P[fFlL]*)|(?P0*)(?P\d+)(?P[uUlL]*)""", r'L?"([^"\\]|\\.)*"', r'[a-zA-Z_]\w*', r'%:%:|<<=|>>=|\.\.\.|<<|<%|<:|<=|>>|>=|\+\+|\+=|--|->|-=|\*=|/=|%:|%=|%>|==|&&|&=|\|\||\|=|\^=|:>|!=|##|[\(\)\{\}\[\]<>\?\|\^\*\+&=:!#;,%/\-\?\~\.]', ] """Expression types""" re_clexer = re.compile('|'.join(["(?P<%s>%s)" % (name, part) for name, part in zip(tok_types, exp_types)]), re.M) """Match expressions into tokens""" accepted = 'a' """Parser state is *accepted*""" ignored = 'i' """Parser state is *ignored*, for example preprocessor lines in an #if 0 block""" undefined = 'u' """Parser state is *undefined* at the moment""" skipped = 's' """Parser state is *skipped*, for example preprocessor lines in a #elif 0 block""" def repl(m): """Replace function used with :py:attr:`waflib.Tools.c_preproc.re_cpp`""" s = m.group() if s[0] == '/': return ' ' return s prec = {} """ Operator precedence rules required for parsing expressions of the form:: #if 1 && 2 != 0 """ ops = ['* / %', '+ -', '<< >>', '< <= >= >', '== !=', '& | ^', '&& ||', ','] for x, syms in enumerate(ops): for u in syms.split(): prec[u] = x def reduce_nums(val_1, val_2, val_op): """ Apply arithmetic rules to compute a result :param val1: input parameter :type val1: int or string :param val2: input parameter :type val2: int or string :param val_op: C operator in *+*, */*, *-*, etc :type val_op: string :rtype: int """ #print val_1, val_2, val_op # now perform the operation, make certain a and b are numeric try: a = 0 + val_1 except TypeError: a = int(val_1) try: b = 0 + val_2 except TypeError: b = int(val_2) d = val_op if d == '%': c = a % b elif d=='+': c = a + b elif d=='-': c = a - b elif d=='*': c = a * b elif d=='/': c = a / b elif d=='^': c = a ^ b elif d=='==': c = int(a == b) elif d=='|' or d == 'bitor': c = a | b elif d=='||' or d == 'or' : c = int(a or b) elif d=='&' or d == 'bitand': c = a & b elif d=='&&' or d == 'and': c = int(a and b) elif d=='!=' or d == 'not_eq': c = int(a != b) elif d=='^' or d == 'xor': c = int(a^b) elif d=='<=': c = int(a <= b) elif d=='<': c = int(a < b) elif d=='>': c = int(a > b) elif d=='>=': c = int(a >= b) elif d=='<<': c = a << b elif d=='>>': c = a >> b else: c = 0 return c def get_num(lst): """ Try to obtain a number from a list of tokens. The token types are defined in :py:attr:`waflib.Tools.ccroot.tok_types`. :param lst: list of preprocessor tokens :type lst: list of tuple (tokentype, value) :return: a pair containing the number and the rest of the list :rtype: tuple(value, list) """ if not lst: raise PreprocError('empty list for get_num') (p, v) = lst[0] if p == OP: if v == '(': count_par = 1 i = 1 while i < len(lst): (p, v) = lst[i] if p == OP: if v == ')': count_par -= 1 if count_par == 0: break elif v == '(': count_par += 1 i += 1 else: raise PreprocError('rparen expected %r' % lst) (num, _) = get_term(lst[1:i]) return (num, lst[i+1:]) elif v == '+': return get_num(lst[1:]) elif v == '-': num, lst = get_num(lst[1:]) return (reduce_nums('-1', num, '*'), lst) elif v == '!': num, lst = get_num(lst[1:]) return (int(not int(num)), lst) elif v == '~': num, lst = get_num(lst[1:]) return (~ int(num), lst) else: raise PreprocError('Invalid op token %r for get_num' % lst) elif p == NUM: return v, lst[1:] elif p == IDENT: # all macros should have been replaced, remaining identifiers eval to 0 return 0, lst[1:] else: raise PreprocError('Invalid token %r for get_num' % lst) def get_term(lst): """ Evaluate an expression recursively, for example:: 1+1+1 -> 2+1 -> 3 :param lst: list of tokens :type lst: list of tuple(token, value) :return: the value and the remaining tokens :rtype: value, list """ if not lst: raise PreprocError('empty list for get_term') num, lst = get_num(lst) if not lst: return (num, []) (p, v) = lst[0] if p == OP: if v == ',': # skip return get_term(lst[1:]) elif v == '?': count_par = 0 i = 1 while i < len(lst): (p, v) = lst[i] if p == OP: if v == ')': count_par -= 1 elif v == '(': count_par += 1 elif v == ':': if count_par == 0: break i += 1 else: raise PreprocError('rparen expected %r' % lst) if int(num): return get_term(lst[1:i]) else: return get_term(lst[i+1:]) else: num2, lst = get_num(lst[1:]) if not lst: # no more tokens to process num2 = reduce_nums(num, num2, v) return get_term([(NUM, num2)] + lst) # operator precedence p2, v2 = lst[0] if p2 != OP: raise PreprocError('op expected %r' % lst) if prec[v2] >= prec[v]: num2 = reduce_nums(num, num2, v) return get_term([(NUM, num2)] + lst) else: num3, lst = get_num(lst[1:]) num3 = reduce_nums(num2, num3, v2) return get_term([(NUM, num), (p, v), (NUM, num3)] + lst) raise PreprocError('cannot reduce %r' % lst) def reduce_eval(lst): """ Take a list of tokens and output true or false for #if/#elif conditions. :param lst: a list of tokens :type lst: list of tuple(token, value) :return: a token :rtype: tuple(NUM, int) """ num, lst = get_term(lst) return (NUM, num) def stringize(lst): """ Merge a list of tokens into a string :param lst: a list of tokens :type lst: list of tuple(token, value) :rtype: string """ lst = [str(v2) for (p2, v2) in lst] return "".join(lst) def paste_tokens(t1, t2): """ Token pasting works between identifiers, particular operators, and identifiers and numbers:: a ## b -> ab > ## = -> >= a ## 2 -> a2 :param t1: token :type t1: tuple(type, value) :param t2: token :type t2: tuple(type, value) """ p1 = None if t1[0] == OP and t2[0] == OP: p1 = OP elif t1[0] == IDENT and (t2[0] == IDENT or t2[0] == NUM): p1 = IDENT elif t1[0] == NUM and t2[0] == NUM: p1 = NUM if not p1: raise PreprocError('tokens do not make a valid paste %r and %r' % (t1, t2)) return (p1, t1[1] + t2[1]) def reduce_tokens(lst, defs, ban=[]): """ Replace the tokens in lst, using the macros provided in defs, and a list of macros that cannot be re-applied :param lst: list of tokens :type lst: list of tuple(token, value) :param defs: macro definitions :type defs: dict :param ban: macros that cannot be substituted (recursion is not allowed) :type ban: list of string :return: the new list of tokens :rtype: value, list """ i = 0 while i < len(lst): (p, v) = lst[i] if p == IDENT and v == "defined": del lst[i] if i < len(lst): (p2, v2) = lst[i] if p2 == IDENT: if v2 in defs: lst[i] = (NUM, 1) else: lst[i] = (NUM, 0) elif p2 == OP and v2 == '(': del lst[i] (p2, v2) = lst[i] del lst[i] # remove the ident, and change the ) for the value if v2 in defs: lst[i] = (NUM, 1) else: lst[i] = (NUM, 0) else: raise PreprocError('Invalid define expression %r' % lst) elif p == IDENT and v in defs: if isinstance(defs[v], str): a, b = extract_macro(defs[v]) defs[v] = b macro_def = defs[v] to_add = macro_def[1] if isinstance(macro_def[0], list): # macro without arguments del lst[i] accu = to_add[:] reduce_tokens(accu, defs, ban+[v]) for tmp in accu: lst.insert(i, tmp) i += 1 else: # collect the arguments for the funcall args = [] del lst[i] if i >= len(lst): raise PreprocError('expected ( after %r (got nothing)' % v) (p2, v2) = lst[i] if p2 != OP or v2 != '(': raise PreprocError('expected ( after %r' % v) del lst[i] one_param = [] count_paren = 0 while i < len(lst): p2, v2 = lst[i] del lst[i] if p2 == OP and count_paren == 0: if v2 == '(': one_param.append((p2, v2)) count_paren += 1 elif v2 == ')': if one_param: args.append(one_param) break elif v2 == ',': if not one_param: raise PreprocError('empty param in funcall %r' % v) args.append(one_param) one_param = [] else: one_param.append((p2, v2)) else: one_param.append((p2, v2)) if v2 == '(': count_paren += 1 elif v2 == ')': count_paren -= 1 else: raise PreprocError('malformed macro') # substitute the arguments within the define expression accu = [] arg_table = macro_def[0] j = 0 while j < len(to_add): (p2, v2) = to_add[j] if p2 == OP and v2 == '#': # stringize is for arguments only if j+1 < len(to_add) and to_add[j+1][0] == IDENT and to_add[j+1][1] in arg_table: toks = args[arg_table[to_add[j+1][1]]] accu.append((STR, stringize(toks))) j += 1 else: accu.append((p2, v2)) elif p2 == OP and v2 == '##': # token pasting, how can man invent such a complicated system? if accu and j+1 < len(to_add): # we have at least two tokens t1 = accu[-1] if to_add[j+1][0] == IDENT and to_add[j+1][1] in arg_table: toks = args[arg_table[to_add[j+1][1]]] if toks: accu[-1] = paste_tokens(t1, toks[0]) #(IDENT, accu[-1][1] + toks[0][1]) accu.extend(toks[1:]) else: # error, case "a##" accu.append((p2, v2)) accu.extend(toks) elif to_add[j+1][0] == IDENT and to_add[j+1][1] == '__VA_ARGS__': # first collect the tokens va_toks = [] st = len(macro_def[0]) pt = len(args) for x in args[pt-st+1:]: va_toks.extend(x) va_toks.append((OP, ',')) if va_toks: va_toks.pop() # extra comma if len(accu)>1: (p3, v3) = accu[-1] (p4, v4) = accu[-2] if v3 == '##': # remove the token paste accu.pop() if v4 == ',' and pt < st: # remove the comma accu.pop() accu += va_toks else: accu[-1] = paste_tokens(t1, to_add[j+1]) j += 1 else: # Invalid paste, case "##a" or "b##" accu.append((p2, v2)) elif p2 == IDENT and v2 in arg_table: toks = args[arg_table[v2]] reduce_tokens(toks, defs, ban+[v]) accu.extend(toks) else: accu.append((p2, v2)) j += 1 reduce_tokens(accu, defs, ban+[v]) for x in range(len(accu)-1, -1, -1): lst.insert(i, accu[x]) i += 1 def eval_macro(lst, defs): """ Reduce the tokens by :py:func:`waflib.Tools.c_preproc.reduce_tokens` and try to return a 0/1 result by :py:func:`waflib.Tools.c_preproc.reduce_eval`. :param lst: list of tokens :type lst: list of tuple(token, value) :param defs: macro definitions :type defs: dict :rtype: int """ reduce_tokens(lst, defs, []) if not lst: raise PreprocError('missing tokens to evaluate') if lst: p, v = lst[0] if p == IDENT and v not in defs: raise PreprocError('missing macro %r' % lst) p, v = reduce_eval(lst) return int(v) != 0 def extract_macro(txt): """ Process a macro definition of the form:: #define f(x, y) x * y into a function or a simple macro without arguments :param txt: expression to exact a macro definition from :type txt: string :return: a tuple containing the name, the list of arguments and the replacement :rtype: tuple(string, [list, list]) """ t = tokenize(txt) if re_fun.search(txt): p, name = t[0] p, v = t[1] if p != OP: raise PreprocError('expected (') i = 1 pindex = 0 params = {} prev = '(' while 1: i += 1 p, v = t[i] if prev == '(': if p == IDENT: params[v] = pindex pindex += 1 prev = p elif p == OP and v == ')': break else: raise PreprocError('unexpected token (3)') elif prev == IDENT: if p == OP and v == ',': prev = v elif p == OP and v == ')': break else: raise PreprocError('comma or ... expected') elif prev == ',': if p == IDENT: params[v] = pindex pindex += 1 prev = p elif p == OP and v == '...': raise PreprocError('not implemented (1)') else: raise PreprocError('comma or ... expected (2)') elif prev == '...': raise PreprocError('not implemented (2)') else: raise PreprocError('unexpected else') #~ print (name, [params, t[i+1:]]) return (name, [params, t[i+1:]]) else: (p, v) = t[0] if len(t) > 1: return (v, [[], t[1:]]) else: # empty define, assign an empty token return (v, [[], [('T','')]]) re_include = re.compile(r'^\s*(<(?:.*)>|"(?:.*)")') def extract_include(txt, defs): """ Process a line in the form:: #include foo :param txt: include line to process :type txt: string :param defs: macro definitions :type defs: dict :return: the file name :rtype: string """ m = re_include.search(txt) if m: txt = m.group(1) return txt[0], txt[1:-1] # perform preprocessing and look at the result, it must match an include toks = tokenize(txt) reduce_tokens(toks, defs, ['waf_include']) if not toks: raise PreprocError('could not parse include %r' % txt) if len(toks) == 1: if toks[0][0] == STR: return '"', toks[0][1] else: if toks[0][1] == '<' and toks[-1][1] == '>': ret = '<', stringize(toks).lstrip('<').rstrip('>') return ret raise PreprocError('could not parse include %r' % txt) def parse_char(txt): """ Parse a c character :param txt: character to parse :type txt: string :return: a character literal :rtype: string """ if not txt: raise PreprocError('attempted to parse a null char') if txt[0] != '\\': return ord(txt) c = txt[1] if c == 'x': if len(txt) == 4 and txt[3] in string.hexdigits: return int(txt[2:], 16) return int(txt[2:], 16) elif c.isdigit(): if c == '0' and len(txt)==2: return 0 for i in 3, 2, 1: if len(txt) > i and txt[1:1+i].isdigit(): return (1+i, int(txt[1:1+i], 8)) else: try: return chr_esc[c] except KeyError: raise PreprocError('could not parse char literal %r' % txt) def tokenize(s): """ Convert a string into a list of tokens (shlex.split does not apply to c/c++/d) :param s: input to tokenize :type s: string :return: a list of tokens :rtype: list of tuple(token, value) """ return tokenize_private(s)[:] # force a copy of the results def tokenize_private(s): ret = [] for match in re_clexer.finditer(s): m = match.group for name in tok_types: v = m(name) if v: if name == IDENT: if v in g_optrans: name = OP elif v.lower() == "true": v = 1 name = NUM elif v.lower() == "false": v = 0 name = NUM elif name == NUM: if m('oct'): v = int(v, 8) elif m('hex'): v = int(m('hex'), 16) elif m('n0'): v = m('n0') else: v = m('char') if v: v = parse_char(v) else: v = m('n2') or m('n4') elif name == OP: if v == '%:': v = '#' elif v == '%:%:': v = '##' elif name == STR: # remove the quotes around the string v = v[1:-1] ret.append((name, v)) break return ret def format_defines(lst): ret = [] for y in lst: if y: pos = y.find('=') if pos == -1: # "-DFOO" should give "#define FOO 1" ret.append(y) elif pos > 0: # all others are assumed to be -DX=Y ret.append('%s %s' % (y[:pos], y[pos+1:])) else: raise ValueError('Invalid define expression %r' % y) return ret class c_parser(object): """ Used by :py:func:`waflib.Tools.c_preproc.scan` to parse c/h files. Note that by default, only project headers are parsed. """ def __init__(self, nodepaths=None, defines=None): self.lines = [] """list of lines read""" if defines is None: self.defs = {} else: self.defs = dict(defines) # make a copy self.state = [] self.count_files = 0 self.currentnode_stack = [] self.nodepaths = nodepaths or [] """Include paths""" self.nodes = [] """List of :py:class:`waflib.Node.Node` found so far""" self.names = [] """List of file names that could not be matched by any file""" self.curfile = '' """Current file""" self.ban_includes = set() """Includes that must not be read (#pragma once)""" self.listed = set() """Include nodes/names already listed to avoid duplicates in self.nodes/self.names""" def cached_find_resource(self, node, filename): """ Find a file from the input directory :param node: directory :type node: :py:class:`waflib.Node.Node` :param filename: header to find :type filename: string :return: the node if found, or None :rtype: :py:class:`waflib.Node.Node` """ try: cache = node.ctx.preproc_cache_node except AttributeError: cache = node.ctx.preproc_cache_node = Utils.lru_cache(FILE_CACHE_SIZE) key = (node, filename) try: return cache[key] except KeyError: ret = node.find_resource(filename) if ret: if getattr(ret, 'children', None): ret = None elif ret.is_child_of(node.ctx.bldnode): tmp = node.ctx.srcnode.search_node(ret.path_from(node.ctx.bldnode)) if tmp and getattr(tmp, 'children', None): ret = None cache[key] = ret return ret def tryfind(self, filename, kind='"', env=None): """ Try to obtain a node from the filename based from the include paths. Will add the node found to :py:attr:`waflib.Tools.c_preproc.c_parser.nodes` or the file name to :py:attr:`waflib.Tools.c_preproc.c_parser.names` if no corresponding file is found. Called by :py:attr:`waflib.Tools.c_preproc.c_parser.start`. :param filename: header to find :type filename: string :return: the node if found :rtype: :py:class:`waflib.Node.Node` """ if filename.endswith('.moc'): # we could let the qt4 module use a subclass, but then the function "scan" below must be duplicated # in the qt4 and in the qt5 classes. So we have two lines here and it is sufficient. self.names.append(filename) return None self.curfile = filename found = None if kind == '"': if env.MSVC_VERSION: for n in reversed(self.currentnode_stack): found = self.cached_find_resource(n, filename) if found: break else: found = self.cached_find_resource(self.currentnode_stack[-1], filename) if not found: for n in self.nodepaths: found = self.cached_find_resource(n, filename) if found: break listed = self.listed if found and not found in self.ban_includes: if found not in listed: listed.add(found) self.nodes.append(found) self.addlines(found) else: if filename not in listed: listed.add(filename) self.names.append(filename) return found def filter_comments(self, node): """ Filter the comments from a c/h file, and return the preprocessor lines. The regexps :py:attr:`waflib.Tools.c_preproc.re_cpp`, :py:attr:`waflib.Tools.c_preproc.re_nl` and :py:attr:`waflib.Tools.c_preproc.re_lines` are used internally. :return: the preprocessor directives as a list of (keyword, line) :rtype: a list of string pairs """ # return a list of tuples : keyword, line code = node.read() if use_trigraphs: for (a, b) in trig_def: code = code.split(a).join(b) code = re_nl.sub('', code) code = re_cpp.sub(repl, code) return re_lines.findall(code) def parse_lines(self, node): try: cache = node.ctx.preproc_cache_lines except AttributeError: cache = node.ctx.preproc_cache_lines = Utils.lru_cache(LINE_CACHE_SIZE) try: return cache[node] except KeyError: cache[node] = lines = self.filter_comments(node) lines.append((POPFILE, '')) lines.reverse() return lines def addlines(self, node): """ Add the lines from a header in the list of preprocessor lines to parse :param node: header :type node: :py:class:`waflib.Node.Node` """ self.currentnode_stack.append(node.parent) self.count_files += 1 if self.count_files > recursion_limit: # issue #812 raise PreprocError('recursion limit exceeded') if Logs.verbose: Logs.debug('preproc: reading file %r', node) try: lines = self.parse_lines(node) except EnvironmentError: raise PreprocError('could not read the file %r' % node) except Exception: if Logs.verbose > 0: Logs.error('parsing %r failed %s', node, traceback.format_exc()) else: self.lines.extend(lines) def start(self, node, env): """ Preprocess a source file to obtain the dependencies, which are accumulated to :py:attr:`waflib.Tools.c_preproc.c_parser.nodes` and :py:attr:`waflib.Tools.c_preproc.c_parser.names`. :param node: source file :type node: :py:class:`waflib.Node.Node` :param env: config set containing additional defines to take into account :type env: :py:class:`waflib.ConfigSet.ConfigSet` """ Logs.debug('preproc: scanning %s (in %s)', node.name, node.parent.name) self.current_file = node self.addlines(node) # macros may be defined on the command-line, so they must be parsed as if they were part of the file if env.DEFINES: lst = format_defines(env.DEFINES) lst.reverse() self.lines.extend([('define', x) for x in lst]) while self.lines: (token, line) = self.lines.pop() if token == POPFILE: self.count_files -= 1 self.currentnode_stack.pop() continue try: state = self.state # make certain we define the state if we are about to enter in an if block if token[:2] == 'if': state.append(undefined) elif token == 'endif': state.pop() # skip lines when in a dead 'if' branch, wait for the endif if token[0] != 'e': if skipped in self.state or ignored in self.state: continue if token == 'if': ret = eval_macro(tokenize(line), self.defs) if ret: state[-1] = accepted else: state[-1] = ignored elif token == 'ifdef': m = re_mac.match(line) if m and m.group() in self.defs: state[-1] = accepted else: state[-1] = ignored elif token == 'ifndef': m = re_mac.match(line) if m and m.group() in self.defs: state[-1] = ignored else: state[-1] = accepted elif token == 'include' or token == 'import': (kind, inc) = extract_include(line, self.defs) self.current_file = self.tryfind(inc, kind, env) if token == 'import': self.ban_includes.add(self.current_file) elif token == 'elif': if state[-1] == accepted: state[-1] = skipped elif state[-1] == ignored: if eval_macro(tokenize(line), self.defs): state[-1] = accepted elif token == 'else': if state[-1] == accepted: state[-1] = skipped elif state[-1] == ignored: state[-1] = accepted elif token == 'define': try: self.defs[self.define_name(line)] = line except AttributeError: raise PreprocError('Invalid define line %r' % line) elif token == 'undef': m = re_mac.match(line) if m and m.group() in self.defs: self.defs.__delitem__(m.group()) #print "undef %s" % name elif token == 'pragma': if re_pragma_once.match(line.lower()): self.ban_includes.add(self.current_file) except Exception as e: if Logs.verbose: Logs.debug('preproc: line parsing failed (%s): %s %s', e, line, traceback.format_exc()) def define_name(self, line): """ :param line: define line :type line: string :rtype: string :return: the define name """ return re_mac.match(line).group() def scan(task): """ Get the dependencies using a c/c++ preprocessor, this is required for finding dependencies of the kind:: #include some_macro() This function is bound as a task method on :py:class:`waflib.Tools.c.c` and :py:class:`waflib.Tools.cxx.cxx` for example """ try: incn = task.generator.includes_nodes except AttributeError: raise Errors.WafError('%r is missing a feature such as "c", "cxx" or "includes": ' % task.generator) if go_absolute: nodepaths = incn + [task.generator.bld.root.find_dir(x) for x in standard_includes] else: nodepaths = [x for x in incn if x.is_child_of(x.ctx.srcnode) or x.is_child_of(x.ctx.bldnode)] tmp = c_parser(nodepaths) tmp.start(task.inputs[0], task.env) return (tmp.nodes, tmp.names) tdb-1.4.2/third_party/waf/waflib/Tools/c_tests.py0000660000000000000000000001354613444661622021734 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2016-2018 (ita) """ Various configuration tests. """ from waflib import Task from waflib.Configure import conf from waflib.TaskGen import feature, before_method, after_method LIB_CODE = ''' #ifdef _MSC_VER #define testEXPORT __declspec(dllexport) #else #define testEXPORT #endif testEXPORT int lib_func(void) { return 9; } ''' MAIN_CODE = ''' #ifdef _MSC_VER #define testEXPORT __declspec(dllimport) #else #define testEXPORT #endif testEXPORT int lib_func(void); int main(int argc, char **argv) { (void)argc; (void)argv; return !(lib_func() == 9); } ''' @feature('link_lib_test') @before_method('process_source') def link_lib_test_fun(self): """ The configuration test :py:func:`waflib.Configure.run_build` declares a unique task generator, so we need to create other task generators from here to check if the linker is able to link libraries. """ def write_test_file(task): task.outputs[0].write(task.generator.code) rpath = [] if getattr(self, 'add_rpath', False): rpath = [self.bld.path.get_bld().abspath()] mode = self.mode m = '%s %s' % (mode, mode) ex = self.test_exec and 'test_exec' or '' bld = self.bld bld(rule=write_test_file, target='test.' + mode, code=LIB_CODE) bld(rule=write_test_file, target='main.' + mode, code=MAIN_CODE) bld(features='%sshlib' % m, source='test.' + mode, target='test') bld(features='%sprogram %s' % (m, ex), source='main.' + mode, target='app', use='test', rpath=rpath) @conf def check_library(self, mode=None, test_exec=True): """ Checks if libraries can be linked with the current linker. Uses :py:func:`waflib.Tools.c_tests.link_lib_test_fun`. :param mode: c or cxx or d :type mode: string """ if not mode: mode = 'c' if self.env.CXX: mode = 'cxx' self.check( compile_filename = [], features = 'link_lib_test', msg = 'Checking for libraries', mode = mode, test_exec = test_exec) ######################################################################################## INLINE_CODE = ''' typedef int foo_t; static %s foo_t static_foo () {return 0; } %s foo_t foo () { return 0; } ''' INLINE_VALUES = ['inline', '__inline__', '__inline'] @conf def check_inline(self, **kw): """ Checks for the right value for inline macro. Define INLINE_MACRO to 1 if the define is found. If the inline macro is not 'inline', add a define to the ``config.h`` (#define inline __inline__) :param define_name: define INLINE_MACRO by default to 1 if the macro is defined :type define_name: string :param features: by default *c* or *cxx* depending on the compiler present :type features: list of string """ self.start_msg('Checking for inline') if not 'define_name' in kw: kw['define_name'] = 'INLINE_MACRO' if not 'features' in kw: if self.env.CXX: kw['features'] = ['cxx'] else: kw['features'] = ['c'] for x in INLINE_VALUES: kw['fragment'] = INLINE_CODE % (x, x) try: self.check(**kw) except self.errors.ConfigurationError: continue else: self.end_msg(x) if x != 'inline': self.define('inline', x, quote=False) return x self.fatal('could not use inline functions') ######################################################################################## LARGE_FRAGMENT = '''#include int main(int argc, char **argv) { (void)argc; (void)argv; return !(sizeof(off_t) >= 8); } ''' @conf def check_large_file(self, **kw): """ Checks for large file support and define the macro HAVE_LARGEFILE The test is skipped on win32 systems (DEST_BINFMT == pe). :param define_name: define to set, by default *HAVE_LARGEFILE* :type define_name: string :param execute: execute the test (yes by default) :type execute: bool """ if not 'define_name' in kw: kw['define_name'] = 'HAVE_LARGEFILE' if not 'execute' in kw: kw['execute'] = True if not 'features' in kw: if self.env.CXX: kw['features'] = ['cxx', 'cxxprogram'] else: kw['features'] = ['c', 'cprogram'] kw['fragment'] = LARGE_FRAGMENT kw['msg'] = 'Checking for large file support' ret = True try: if self.env.DEST_BINFMT != 'pe': ret = self.check(**kw) except self.errors.ConfigurationError: pass else: if ret: return True kw['msg'] = 'Checking for -D_FILE_OFFSET_BITS=64' kw['defines'] = ['_FILE_OFFSET_BITS=64'] try: ret = self.check(**kw) except self.errors.ConfigurationError: pass else: self.define('_FILE_OFFSET_BITS', 64) return ret self.fatal('There is no support for large files') ######################################################################################## ENDIAN_FRAGMENT = ''' short int ascii_mm[] = { 0x4249, 0x4765, 0x6E44, 0x6961, 0x6E53, 0x7953, 0 }; short int ascii_ii[] = { 0x694C, 0x5454, 0x656C, 0x6E45, 0x6944, 0x6E61, 0 }; int use_ascii (int i) { return ascii_mm[i] + ascii_ii[i]; } short int ebcdic_ii[] = { 0x89D3, 0xE3E3, 0x8593, 0x95C5, 0x89C4, 0x9581, 0 }; short int ebcdic_mm[] = { 0xC2C9, 0xC785, 0x95C4, 0x8981, 0x95E2, 0xA8E2, 0 }; int use_ebcdic (int i) { return ebcdic_mm[i] + ebcdic_ii[i]; } extern int foo; ''' class grep_for_endianness(Task.Task): """ Task that reads a binary and tries to determine the endianness """ color = 'PINK' def run(self): txt = self.inputs[0].read(flags='rb').decode('latin-1') if txt.find('LiTTleEnDian') > -1: self.generator.tmp.append('little') elif txt.find('BIGenDianSyS') > -1: self.generator.tmp.append('big') else: return -1 @feature('grep_for_endianness') @after_method('process_source') def grep_for_endianness_fun(self): """ Used by the endianness configuration test """ self.create_task('grep_for_endianness', self.compiled_tasks[0].outputs[0]) @conf def check_endianness(self): """ Executes a configuration test to determine the endianness """ tmp = [] def check_msg(self): return tmp[0] self.check(fragment=ENDIAN_FRAGMENT, features='c grep_for_endianness', msg='Checking for endianness', define='ENDIANNESS', tmp=tmp, okmsg=check_msg) return tmp[0] tdb-1.4.2/third_party/waf/waflib/Tools/ccroot.py0000660000000000000000000006322413527011455021552 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005-2018 (ita) """ Classes and methods shared by tools providing support for C-like language such as C/C++/D/Assembly/Go (this support module is almost never used alone). """ import os, re from waflib import Task, Utils, Node, Errors, Logs from waflib.TaskGen import after_method, before_method, feature, taskgen_method, extension from waflib.Tools import c_aliases, c_preproc, c_config, c_osx, c_tests from waflib.Configure import conf SYSTEM_LIB_PATHS = ['/usr/lib64', '/usr/lib', '/usr/local/lib64', '/usr/local/lib'] USELIB_VARS = Utils.defaultdict(set) """ Mapping for features to :py:class:`waflib.ConfigSet.ConfigSet` variables. See :py:func:`waflib.Tools.ccroot.propagate_uselib_vars`. """ USELIB_VARS['c'] = set(['INCLUDES', 'FRAMEWORKPATH', 'DEFINES', 'CPPFLAGS', 'CCDEPS', 'CFLAGS', 'ARCH']) USELIB_VARS['cxx'] = set(['INCLUDES', 'FRAMEWORKPATH', 'DEFINES', 'CPPFLAGS', 'CXXDEPS', 'CXXFLAGS', 'ARCH']) USELIB_VARS['d'] = set(['INCLUDES', 'DFLAGS']) USELIB_VARS['includes'] = set(['INCLUDES', 'FRAMEWORKPATH', 'ARCH']) USELIB_VARS['cprogram'] = USELIB_VARS['cxxprogram'] = set(['LIB', 'STLIB', 'LIBPATH', 'STLIBPATH', 'LINKFLAGS', 'RPATH', 'LINKDEPS', 'FRAMEWORK', 'FRAMEWORKPATH', 'ARCH', 'LDFLAGS']) USELIB_VARS['cshlib'] = USELIB_VARS['cxxshlib'] = set(['LIB', 'STLIB', 'LIBPATH', 'STLIBPATH', 'LINKFLAGS', 'RPATH', 'LINKDEPS', 'FRAMEWORK', 'FRAMEWORKPATH', 'ARCH', 'LDFLAGS']) USELIB_VARS['cstlib'] = USELIB_VARS['cxxstlib'] = set(['ARFLAGS', 'LINKDEPS']) USELIB_VARS['dprogram'] = set(['LIB', 'STLIB', 'LIBPATH', 'STLIBPATH', 'LINKFLAGS', 'RPATH', 'LINKDEPS']) USELIB_VARS['dshlib'] = set(['LIB', 'STLIB', 'LIBPATH', 'STLIBPATH', 'LINKFLAGS', 'RPATH', 'LINKDEPS']) USELIB_VARS['dstlib'] = set(['ARFLAGS', 'LINKDEPS']) USELIB_VARS['asm'] = set(['ASFLAGS']) # ================================================================================================= @taskgen_method def create_compiled_task(self, name, node): """ Create the compilation task: c, cxx, asm, etc. The output node is created automatically (object file with a typical **.o** extension). The task is appended to the list *compiled_tasks* which is then used by :py:func:`waflib.Tools.ccroot.apply_link` :param name: name of the task class :type name: string :param node: the file to compile :type node: :py:class:`waflib.Node.Node` :return: The task created :rtype: :py:class:`waflib.Task.Task` """ out = '%s.%d.o' % (node.name, self.idx) task = self.create_task(name, node, node.parent.find_or_declare(out)) try: self.compiled_tasks.append(task) except AttributeError: self.compiled_tasks = [task] return task @taskgen_method def to_incnodes(self, inlst): """ Task generator method provided to convert a list of string/nodes into a list of includes folders. The paths are assumed to be relative to the task generator path, except if they begin by **#** in which case they are searched from the top-level directory (``bld.srcnode``). The folders are simply assumed to be existing. The node objects in the list are returned in the output list. The strings are converted into node objects if possible. The node is searched from the source directory, and if a match is found, the equivalent build directory is created and added to the returned list too. When a folder cannot be found, it is ignored. :param inlst: list of folders :type inlst: space-delimited string or a list of string/nodes :rtype: list of :py:class:`waflib.Node.Node` :return: list of include folders as nodes """ lst = [] seen = set() for x in self.to_list(inlst): if x in seen or not x: continue seen.add(x) # with a real lot of targets, it is sometimes interesting to cache the results below if isinstance(x, Node.Node): lst.append(x) else: if os.path.isabs(x): lst.append(self.bld.root.make_node(x) or x) else: if x[0] == '#': p = self.bld.bldnode.make_node(x[1:]) v = self.bld.srcnode.make_node(x[1:]) else: p = self.path.get_bld().make_node(x) v = self.path.make_node(x) if p.is_child_of(self.bld.bldnode): p.mkdir() lst.append(p) lst.append(v) return lst @feature('c', 'cxx', 'd', 'asm', 'fc', 'includes') @after_method('propagate_uselib_vars', 'process_source') def apply_incpaths(self): """ Task generator method that processes the attribute *includes*:: tg = bld(features='includes', includes='.') The folders only need to be relative to the current directory, the equivalent build directory is added automatically (for headers created in the build directory). This enables using a build directory or not (``top == out``). This method will add a list of nodes read by :py:func:`waflib.Tools.ccroot.to_incnodes` in ``tg.env.INCPATHS``, and the list of include paths in ``tg.env.INCLUDES``. """ lst = self.to_incnodes(self.to_list(getattr(self, 'includes', [])) + self.env.INCLUDES) self.includes_nodes = lst cwd = self.get_cwd() self.env.INCPATHS = [x.path_from(cwd) for x in lst] class link_task(Task.Task): """ Base class for all link tasks. A task generator is supposed to have at most one link task bound in the attribute *link_task*. See :py:func:`waflib.Tools.ccroot.apply_link`. .. inheritance-diagram:: waflib.Tools.ccroot.stlink_task waflib.Tools.c.cprogram waflib.Tools.c.cshlib waflib.Tools.cxx.cxxstlib waflib.Tools.cxx.cxxprogram waflib.Tools.cxx.cxxshlib waflib.Tools.d.dprogram waflib.Tools.d.dshlib waflib.Tools.d.dstlib waflib.Tools.ccroot.fake_shlib waflib.Tools.ccroot.fake_stlib waflib.Tools.asm.asmprogram waflib.Tools.asm.asmshlib waflib.Tools.asm.asmstlib """ color = 'YELLOW' weight = 3 """Try to process link tasks as early as possible""" inst_to = None """Default installation path for the link task outputs, or None to disable""" chmod = Utils.O755 """Default installation mode for the link task outputs""" def add_target(self, target): """ Process the *target* attribute to add the platform-specific prefix/suffix such as *.so* or *.exe*. The settings are retrieved from ``env.clsname_PATTERN`` """ if isinstance(target, str): base = self.generator.path if target.startswith('#'): # for those who like flat structures target = target[1:] base = self.generator.bld.bldnode pattern = self.env[self.__class__.__name__ + '_PATTERN'] if not pattern: pattern = '%s' folder, name = os.path.split(target) if self.__class__.__name__.find('shlib') > 0 and getattr(self.generator, 'vnum', None): nums = self.generator.vnum.split('.') if self.env.DEST_BINFMT == 'pe': # include the version in the dll file name, # the import lib file name stays unversioned. name = name + '-' + nums[0] elif self.env.DEST_OS == 'openbsd': pattern = '%s.%s' % (pattern, nums[0]) if len(nums) >= 2: pattern += '.%s' % nums[1] if folder: tmp = folder + os.sep + pattern % name else: tmp = pattern % name target = base.find_or_declare(tmp) self.set_outputs(target) def exec_command(self, *k, **kw): ret = super(link_task, self).exec_command(*k, **kw) if not ret and self.env.DO_MANIFEST: ret = self.exec_mf() return ret def exec_mf(self): """ Create manifest files for VS-like compilers (msvc, ifort, ...) """ if not self.env.MT: return 0 manifest = None for out_node in self.outputs: if out_node.name.endswith('.manifest'): manifest = out_node.abspath() break else: # Should never get here. If we do, it means the manifest file was # never added to the outputs list, thus we don't have a manifest file # to embed, so we just return. return 0 # embedding mode. Different for EXE's and DLL's. # see: http://msdn2.microsoft.com/en-us/library/ms235591(VS.80).aspx mode = '' for x in Utils.to_list(self.generator.features): if x in ('cprogram', 'cxxprogram', 'fcprogram', 'fcprogram_test'): mode = 1 elif x in ('cshlib', 'cxxshlib', 'fcshlib'): mode = 2 Logs.debug('msvc: embedding manifest in mode %r', mode) lst = [] + self.env.MT lst.extend(Utils.to_list(self.env.MTFLAGS)) lst.extend(['-manifest', manifest]) lst.append('-outputresource:%s;%s' % (self.outputs[0].abspath(), mode)) return super(link_task, self).exec_command(lst) class stlink_task(link_task): """ Base for static link tasks, which use *ar* most of the time. The target is always removed before being written. """ run_str = '${AR} ${ARFLAGS} ${AR_TGT_F}${TGT} ${AR_SRC_F}${SRC}' chmod = Utils.O644 """Default installation mode for the static libraries""" def rm_tgt(cls): old = cls.run def wrap(self): try: os.remove(self.outputs[0].abspath()) except OSError: pass return old(self) setattr(cls, 'run', wrap) rm_tgt(stlink_task) @feature('skip_stlib_link_deps') @before_method('process_use') def apply_skip_stlib_link_deps(self): """ This enables an optimization in the :py:func:wafilb.Tools.ccroot.processes_use: method that skips dependency and link flag optimizations for targets that generate static libraries (via the :py:class:Tools.ccroot.stlink_task task). The actual behavior is implemented in :py:func:wafilb.Tools.ccroot.processes_use: method so this feature only tells waf to enable the new behavior. """ self.env.SKIP_STLIB_LINK_DEPS = True @feature('c', 'cxx', 'd', 'fc', 'asm') @after_method('process_source') def apply_link(self): """ Collect the tasks stored in ``compiled_tasks`` (created by :py:func:`waflib.Tools.ccroot.create_compiled_task`), and use the outputs for a new instance of :py:class:`waflib.Tools.ccroot.link_task`. The class to use is the first link task matching a name from the attribute *features*, for example:: def build(bld): tg = bld(features='cxx cxxprogram cprogram', source='main.c', target='app') will create the task ``tg.link_task`` as a new instance of :py:class:`waflib.Tools.cxx.cxxprogram` """ for x in self.features: if x == 'cprogram' and 'cxx' in self.features: # limited compat x = 'cxxprogram' elif x == 'cshlib' and 'cxx' in self.features: x = 'cxxshlib' if x in Task.classes: if issubclass(Task.classes[x], link_task): link = x break else: return objs = [t.outputs[0] for t in getattr(self, 'compiled_tasks', [])] self.link_task = self.create_task(link, objs) self.link_task.add_target(self.target) # remember that the install paths are given by the task generators try: inst_to = self.install_path except AttributeError: inst_to = self.link_task.inst_to if inst_to: # install a copy of the node list we have at this moment (implib not added) self.install_task = self.add_install_files( install_to=inst_to, install_from=self.link_task.outputs[:], chmod=self.link_task.chmod, task=self.link_task) @taskgen_method def use_rec(self, name, **kw): """ Processes the ``use`` keyword recursively. This method is kind of private and only meant to be used from ``process_use`` """ if name in self.tmp_use_not or name in self.tmp_use_seen: return try: y = self.bld.get_tgen_by_name(name) except Errors.WafError: self.uselib.append(name) self.tmp_use_not.add(name) return self.tmp_use_seen.append(name) y.post() # bind temporary attributes on the task generator y.tmp_use_objects = objects = kw.get('objects', True) y.tmp_use_stlib = stlib = kw.get('stlib', True) try: link_task = y.link_task except AttributeError: y.tmp_use_var = '' else: objects = False if not isinstance(link_task, stlink_task): stlib = False y.tmp_use_var = 'LIB' else: y.tmp_use_var = 'STLIB' p = self.tmp_use_prec for x in self.to_list(getattr(y, 'use', [])): if self.env["STLIB_" + x]: continue try: p[x].append(name) except KeyError: p[x] = [name] self.use_rec(x, objects=objects, stlib=stlib) @feature('c', 'cxx', 'd', 'use', 'fc') @before_method('apply_incpaths', 'propagate_uselib_vars') @after_method('apply_link', 'process_source') def process_use(self): """ Process the ``use`` attribute which contains a list of task generator names:: def build(bld): bld.shlib(source='a.c', target='lib1') bld.program(source='main.c', target='app', use='lib1') See :py:func:`waflib.Tools.ccroot.use_rec`. """ use_not = self.tmp_use_not = set() self.tmp_use_seen = [] # we would like an ordered set use_prec = self.tmp_use_prec = {} self.uselib = self.to_list(getattr(self, 'uselib', [])) self.includes = self.to_list(getattr(self, 'includes', [])) names = self.to_list(getattr(self, 'use', [])) for x in names: self.use_rec(x) for x in use_not: if x in use_prec: del use_prec[x] # topological sort out = self.tmp_use_sorted = [] tmp = [] for x in self.tmp_use_seen: for k in use_prec.values(): if x in k: break else: tmp.append(x) while tmp: e = tmp.pop() out.append(e) try: nlst = use_prec[e] except KeyError: pass else: del use_prec[e] for x in nlst: for y in use_prec: if x in use_prec[y]: break else: tmp.append(x) if use_prec: raise Errors.WafError('Cycle detected in the use processing %r' % use_prec) out.reverse() link_task = getattr(self, 'link_task', None) for x in out: y = self.bld.get_tgen_by_name(x) var = y.tmp_use_var if var and link_task: if self.env.SKIP_STLIB_LINK_DEPS and isinstance(link_task, stlink_task): # If the skip_stlib_link_deps feature is enabled then we should # avoid adding lib deps to the stlink_task instance. pass elif var == 'LIB' or y.tmp_use_stlib or x in names: self.env.append_value(var, [y.target[y.target.rfind(os.sep) + 1:]]) self.link_task.dep_nodes.extend(y.link_task.outputs) tmp_path = y.link_task.outputs[0].parent.path_from(self.get_cwd()) self.env.append_unique(var + 'PATH', [tmp_path]) else: if y.tmp_use_objects: self.add_objects_from_tgen(y) if getattr(y, 'export_includes', None): # self.includes may come from a global variable #2035 self.includes = self.includes + y.to_incnodes(y.export_includes) if getattr(y, 'export_defines', None): self.env.append_value('DEFINES', self.to_list(y.export_defines)) # and finally, add the use variables (no recursion needed) for x in names: try: y = self.bld.get_tgen_by_name(x) except Errors.WafError: if not self.env['STLIB_' + x] and not x in self.uselib: self.uselib.append(x) else: for k in self.to_list(getattr(y, 'use', [])): if not self.env['STLIB_' + k] and not k in self.uselib: self.uselib.append(k) @taskgen_method def accept_node_to_link(self, node): """ PRIVATE INTERNAL USE ONLY """ return not node.name.endswith('.pdb') @taskgen_method def add_objects_from_tgen(self, tg): """ Add the objects from the depending compiled tasks as link task inputs. Some objects are filtered: for instance, .pdb files are added to the compiled tasks but not to the link tasks (to avoid errors) PRIVATE INTERNAL USE ONLY """ try: link_task = self.link_task except AttributeError: pass else: for tsk in getattr(tg, 'compiled_tasks', []): for x in tsk.outputs: if self.accept_node_to_link(x): link_task.inputs.append(x) @taskgen_method def get_uselib_vars(self): """ :return: the *uselib* variables associated to the *features* attribute (see :py:attr:`waflib.Tools.ccroot.USELIB_VARS`) :rtype: list of string """ _vars = set() for x in self.features: if x in USELIB_VARS: _vars |= USELIB_VARS[x] return _vars @feature('c', 'cxx', 'd', 'fc', 'javac', 'cs', 'uselib', 'asm') @after_method('process_use') def propagate_uselib_vars(self): """ Process uselib variables for adding flags. For example, the following target:: def build(bld): bld.env.AFLAGS_aaa = ['bar'] from waflib.Tools.ccroot import USELIB_VARS USELIB_VARS['aaa'] = ['AFLAGS'] tg = bld(features='aaa', aflags='test') The *aflags* attribute will be processed and this method will set:: tg.env.AFLAGS = ['bar', 'test'] """ _vars = self.get_uselib_vars() env = self.env app = env.append_value feature_uselib = self.features + self.to_list(getattr(self, 'uselib', [])) for var in _vars: y = var.lower() val = getattr(self, y, []) if val: app(var, self.to_list(val)) for x in feature_uselib: val = env['%s_%s' % (var, x)] if val: app(var, val) # ============ the code above must not know anything about import libs ========== @feature('cshlib', 'cxxshlib', 'fcshlib') @after_method('apply_link') def apply_implib(self): """ Handle dlls and their import libs on Windows-like systems. A ``.dll.a`` file called *import library* is generated. It must be installed as it is required for linking the library. """ if not self.env.DEST_BINFMT == 'pe': return dll = self.link_task.outputs[0] if isinstance(self.target, Node.Node): name = self.target.name else: name = os.path.split(self.target)[1] implib = self.env.implib_PATTERN % name implib = dll.parent.find_or_declare(implib) self.env.append_value('LINKFLAGS', self.env.IMPLIB_ST % implib.bldpath()) self.link_task.outputs.append(implib) if getattr(self, 'defs', None) and self.env.DEST_BINFMT == 'pe': node = self.path.find_resource(self.defs) if not node: raise Errors.WafError('invalid def file %r' % self.defs) if self.env.def_PATTERN: self.env.append_value('LINKFLAGS', self.env.def_PATTERN % node.path_from(self.get_cwd())) self.link_task.dep_nodes.append(node) else: # gcc for windows takes *.def file as input without any special flag self.link_task.inputs.append(node) # where to put the import library if getattr(self, 'install_task', None): try: # user has given a specific installation path for the import library inst_to = self.install_path_implib except AttributeError: try: # user has given an installation path for the main library, put the import library in it inst_to = self.install_path except AttributeError: # else, put the library in BINDIR and the import library in LIBDIR inst_to = '${IMPLIBDIR}' self.install_task.install_to = '${BINDIR}' if not self.env.IMPLIBDIR: self.env.IMPLIBDIR = self.env.LIBDIR self.implib_install_task = self.add_install_files(install_to=inst_to, install_from=implib, chmod=self.link_task.chmod, task=self.link_task) # ============ the code above must not know anything about vnum processing on unix platforms ========= re_vnum = re.compile('^([1-9]\\d*|0)([.]([1-9]\\d*|0)){0,2}?$') @feature('cshlib', 'cxxshlib', 'dshlib', 'fcshlib', 'vnum') @after_method('apply_link', 'propagate_uselib_vars') def apply_vnum(self): """ Enforce version numbering on shared libraries. The valid version numbers must have either zero or two dots:: def build(bld): bld.shlib(source='a.c', target='foo', vnum='14.15.16') In this example on Linux platform, ``libfoo.so`` is installed as ``libfoo.so.14.15.16``, and the following symbolic links are created: * ``libfoo.so → libfoo.so.14.15.16`` * ``libfoo.so.14 → libfoo.so.14.15.16`` By default, the library will be assigned SONAME ``libfoo.so.14``, effectively declaring ABI compatibility between all minor and patch releases for the major version of the library. When necessary, the compatibility can be explicitly defined using `cnum` parameter: def build(bld): bld.shlib(source='a.c', target='foo', vnum='14.15.16', cnum='14.15') In this case, the assigned SONAME will be ``libfoo.so.14.15`` with ABI compatibility only between path releases for a specific major and minor version of the library. On OS X platform, install-name parameter will follow the above logic for SONAME with exception that it also specifies an absolute path (based on install_path) of the library. """ if not getattr(self, 'vnum', '') or os.name != 'posix' or self.env.DEST_BINFMT not in ('elf', 'mac-o'): return link = self.link_task if not re_vnum.match(self.vnum): raise Errors.WafError('Invalid vnum %r for target %r' % (self.vnum, getattr(self, 'name', self))) nums = self.vnum.split('.') node = link.outputs[0] cnum = getattr(self, 'cnum', str(nums[0])) cnums = cnum.split('.') if len(cnums)>len(nums) or nums[0:len(cnums)] != cnums: raise Errors.WafError('invalid compatibility version %s' % cnum) libname = node.name if libname.endswith('.dylib'): name3 = libname.replace('.dylib', '.%s.dylib' % self.vnum) name2 = libname.replace('.dylib', '.%s.dylib' % cnum) else: name3 = libname + '.' + self.vnum name2 = libname + '.' + cnum # add the so name for the ld linker - to disable, just unset env.SONAME_ST if self.env.SONAME_ST: v = self.env.SONAME_ST % name2 self.env.append_value('LINKFLAGS', v.split()) # the following task is just to enable execution from the build dir :-/ if self.env.DEST_OS != 'openbsd': outs = [node.parent.make_node(name3)] if name2 != name3: outs.append(node.parent.make_node(name2)) self.create_task('vnum', node, outs) if getattr(self, 'install_task', None): self.install_task.hasrun = Task.SKIPPED self.install_task.no_errcheck_out = True path = self.install_task.install_to if self.env.DEST_OS == 'openbsd': libname = self.link_task.outputs[0].name t1 = self.add_install_as(install_to='%s/%s' % (path, libname), install_from=node, chmod=self.link_task.chmod) self.vnum_install_task = (t1,) else: t1 = self.add_install_as(install_to=path + os.sep + name3, install_from=node, chmod=self.link_task.chmod) t3 = self.add_symlink_as(install_to=path + os.sep + libname, install_from=name3) if name2 != name3: t2 = self.add_symlink_as(install_to=path + os.sep + name2, install_from=name3) self.vnum_install_task = (t1, t2, t3) else: self.vnum_install_task = (t1, t3) if '-dynamiclib' in self.env.LINKFLAGS: # this requires after(propagate_uselib_vars) try: inst_to = self.install_path except AttributeError: inst_to = self.link_task.inst_to if inst_to: p = Utils.subst_vars(inst_to, self.env) path = os.path.join(p, name2) self.env.append_value('LINKFLAGS', ['-install_name', path]) self.env.append_value('LINKFLAGS', '-Wl,-compatibility_version,%s' % cnum) self.env.append_value('LINKFLAGS', '-Wl,-current_version,%s' % self.vnum) class vnum(Task.Task): """ Create the symbolic links for a versioned shared library. Instances are created by :py:func:`waflib.Tools.ccroot.apply_vnum` """ color = 'CYAN' ext_in = ['.bin'] def keyword(self): return 'Symlinking' def run(self): for x in self.outputs: path = x.abspath() try: os.remove(path) except OSError: pass try: os.symlink(self.inputs[0].name, path) except OSError: return 1 class fake_shlib(link_task): """ Task used for reading a system library and adding the dependency on it """ def runnable_status(self): for t in self.run_after: if not t.hasrun: return Task.ASK_LATER return Task.SKIP_ME class fake_stlib(stlink_task): """ Task used for reading a system library and adding the dependency on it """ def runnable_status(self): for t in self.run_after: if not t.hasrun: return Task.ASK_LATER return Task.SKIP_ME @conf def read_shlib(self, name, paths=[], export_includes=[], export_defines=[]): """ Read a system shared library, enabling its use as a local library. Will trigger a rebuild if the file changes:: def build(bld): bld.read_shlib('m') bld.program(source='main.c', use='m') """ return self(name=name, features='fake_lib', lib_paths=paths, lib_type='shlib', export_includes=export_includes, export_defines=export_defines) @conf def read_stlib(self, name, paths=[], export_includes=[], export_defines=[]): """ Read a system static library, enabling a use as a local library. Will trigger a rebuild if the file changes. """ return self(name=name, features='fake_lib', lib_paths=paths, lib_type='stlib', export_includes=export_includes, export_defines=export_defines) lib_patterns = { 'shlib' : ['lib%s.so', '%s.so', 'lib%s.dylib', 'lib%s.dll', '%s.dll'], 'stlib' : ['lib%s.a', '%s.a', 'lib%s.dll', '%s.dll', 'lib%s.lib', '%s.lib'], } @feature('fake_lib') def process_lib(self): """ Find the location of a foreign library. Used by :py:class:`waflib.Tools.ccroot.read_shlib` and :py:class:`waflib.Tools.ccroot.read_stlib`. """ node = None names = [x % self.name for x in lib_patterns[self.lib_type]] for x in self.lib_paths + [self.path] + SYSTEM_LIB_PATHS: if not isinstance(x, Node.Node): x = self.bld.root.find_node(x) or self.path.find_node(x) if not x: continue for y in names: node = x.find_node(y) if node: try: Utils.h_file(node.abspath()) except EnvironmentError: raise ValueError('Could not read %r' % y) break else: continue break else: raise Errors.WafError('could not find library %r' % self.name) self.link_task = self.create_task('fake_%s' % self.lib_type, [], [node]) self.target = self.name class fake_o(Task.Task): def runnable_status(self): return Task.SKIP_ME @extension('.o', '.obj') def add_those_o_files(self, node): tsk = self.create_task('fake_o', [], node) try: self.compiled_tasks.append(tsk) except AttributeError: self.compiled_tasks = [tsk] @feature('fake_obj') @before_method('process_source') def process_objs(self): """ Puts object files in the task generator outputs """ for node in self.to_nodes(self.source): self.add_those_o_files(node) self.source = [] @conf def read_object(self, obj): """ Read an object file, enabling injection in libs/programs. Will trigger a rebuild if the file changes. :param obj: object file path, as string or Node """ if not isinstance(obj, self.path.__class__): obj = self.path.find_resource(obj) return self(features='fake_obj', source=obj, name=obj.name) @feature('cxxprogram', 'cprogram') @after_method('apply_link', 'process_use') def set_full_paths_hpux(self): """ On hp-ux, extend the libpaths and static library paths to absolute paths """ if self.env.DEST_OS != 'hp-ux': return base = self.bld.bldnode.abspath() for var in ['LIBPATH', 'STLIBPATH']: lst = [] for x in self.env[var]: if x.startswith('/'): lst.append(x) else: lst.append(os.path.normpath(os.path.join(base, x))) self.env[var] = lst tdb-1.4.2/third_party/waf/waflib/Tools/clang.py0000660000000000000000000000115713444661622021347 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Krzysztof KosiÅ„ski 2014 """ Detect the Clang C compiler """ from waflib.Tools import ccroot, ar, gcc from waflib.Configure import conf @conf def find_clang(conf): """ Finds the program clang and executes it to ensure it really is clang """ cc = conf.find_program('clang', var='CC') conf.get_cc_version(cc, clang=True) conf.env.CC_NAME = 'clang' def configure(conf): conf.find_clang() conf.find_program(['llvm-ar', 'ar'], var='AR') conf.find_ar() conf.gcc_common_flags() conf.gcc_modifier_platform() conf.cc_load_tools() conf.cc_add_flags() conf.link_add_flags() tdb-1.4.2/third_party/waf/waflib/Tools/clangxx.py0000660000000000000000000000121013444661622021715 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy 2009-2018 (ita) """ Detect the Clang++ C++ compiler """ from waflib.Tools import ccroot, ar, gxx from waflib.Configure import conf @conf def find_clangxx(conf): """ Finds the program clang++, and executes it to ensure it really is clang++ """ cxx = conf.find_program('clang++', var='CXX') conf.get_cc_version(cxx, clang=True) conf.env.CXX_NAME = 'clang' def configure(conf): conf.find_clangxx() conf.find_program(['llvm-ar', 'ar'], var='AR') conf.find_ar() conf.gxx_common_flags() conf.gxx_modifier_platform() conf.cxx_load_tools() conf.cxx_add_flags() conf.link_add_flags() tdb-1.4.2/third_party/waf/waflib/Tools/compiler_c.py0000660000000000000000000000611513444661622022376 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Matthias Jahn jahn dôt matthias ât freenet dôt de, 2007 (pmarat) """ Try to detect a C compiler from the list of supported compilers (gcc, msvc, etc):: def options(opt): opt.load('compiler_c') def configure(cnf): cnf.load('compiler_c') def build(bld): bld.program(source='main.c', target='app') The compilers are associated to platforms in :py:attr:`waflib.Tools.compiler_c.c_compiler`. To register a new C compiler named *cfoo* (assuming the tool ``waflib/extras/cfoo.py`` exists), use:: from waflib.Tools.compiler_c import c_compiler c_compiler['win32'] = ['cfoo', 'msvc', 'gcc'] def options(opt): opt.load('compiler_c') def configure(cnf): cnf.load('compiler_c') def build(bld): bld.program(source='main.c', target='app') Not all compilers need to have a specific tool. For example, the clang compilers can be detected by the gcc tools when using:: $ CC=clang waf configure """ import re from waflib.Tools import ccroot from waflib import Utils from waflib.Logs import debug c_compiler = { 'win32': ['msvc', 'gcc', 'clang'], 'cygwin': ['gcc'], 'darwin': ['clang', 'gcc'], 'aix': ['xlc', 'gcc', 'clang'], 'linux': ['gcc', 'clang', 'icc'], 'sunos': ['suncc', 'gcc'], 'irix': ['gcc', 'irixcc'], 'hpux': ['gcc'], 'osf1V': ['gcc'], 'gnu': ['gcc', 'clang'], 'java': ['gcc', 'msvc', 'clang', 'icc'], 'default':['clang', 'gcc'], } """ Dict mapping platform names to Waf tools finding specific C compilers:: from waflib.Tools.compiler_c import c_compiler c_compiler['linux'] = ['gcc', 'icc', 'suncc'] """ def default_compilers(): build_platform = Utils.unversioned_sys_platform() possible_compiler_list = c_compiler.get(build_platform, c_compiler['default']) return ' '.join(possible_compiler_list) def configure(conf): """ Detects a suitable C compiler :raises: :py:class:`waflib.Errors.ConfigurationError` when no suitable compiler is found """ try: test_for_compiler = conf.options.check_c_compiler or default_compilers() except AttributeError: conf.fatal("Add options(opt): opt.load('compiler_c')") for compiler in re.split('[ ,]+', test_for_compiler): conf.env.stash() conf.start_msg('Checking for %r (C compiler)' % compiler) try: conf.load(compiler) except conf.errors.ConfigurationError as e: conf.env.revert() conf.end_msg(False) debug('compiler_c: %r', e) else: if conf.env.CC: conf.end_msg(conf.env.get_flat('CC')) conf.env.COMPILER_CC = compiler conf.env.commit() break conf.env.revert() conf.end_msg(False) else: conf.fatal('could not configure a C compiler!') def options(opt): """ This is how to provide compiler preferences on the command-line:: $ waf configure --check-c-compiler=gcc """ test_for_compiler = default_compilers() opt.load_special_tools('c_*.py', ban=['c_dumbpreproc.py']) cc_compiler_opts = opt.add_option_group('Configuration options') cc_compiler_opts.add_option('--check-c-compiler', default=None, help='list of C compilers to try [%s]' % test_for_compiler, dest="check_c_compiler") for x in test_for_compiler.split(): opt.load('%s' % x) tdb-1.4.2/third_party/waf/waflib/Tools/compiler_cxx.py0000660000000000000000000000620413444661622022755 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Matthias Jahn jahn dôt matthias ât freenet dôt de 2007 (pmarat) """ Try to detect a C++ compiler from the list of supported compilers (g++, msvc, etc):: def options(opt): opt.load('compiler_cxx') def configure(cnf): cnf.load('compiler_cxx') def build(bld): bld.program(source='main.cpp', target='app') The compilers are associated to platforms in :py:attr:`waflib.Tools.compiler_cxx.cxx_compiler`. To register a new C++ compiler named *cfoo* (assuming the tool ``waflib/extras/cfoo.py`` exists), use:: from waflib.Tools.compiler_cxx import cxx_compiler cxx_compiler['win32'] = ['cfoo', 'msvc', 'gcc'] def options(opt): opt.load('compiler_cxx') def configure(cnf): cnf.load('compiler_cxx') def build(bld): bld.program(source='main.c', target='app') Not all compilers need to have a specific tool. For example, the clang compilers can be detected by the gcc tools when using:: $ CXX=clang waf configure """ import re from waflib.Tools import ccroot from waflib import Utils from waflib.Logs import debug cxx_compiler = { 'win32': ['msvc', 'g++', 'clang++'], 'cygwin': ['g++'], 'darwin': ['clang++', 'g++'], 'aix': ['xlc++', 'g++', 'clang++'], 'linux': ['g++', 'clang++', 'icpc'], 'sunos': ['sunc++', 'g++'], 'irix': ['g++'], 'hpux': ['g++'], 'osf1V': ['g++'], 'gnu': ['g++', 'clang++'], 'java': ['g++', 'msvc', 'clang++', 'icpc'], 'default': ['clang++', 'g++'] } """ Dict mapping the platform names to Waf tools finding specific C++ compilers:: from waflib.Tools.compiler_cxx import cxx_compiler cxx_compiler['linux'] = ['gxx', 'icpc', 'suncxx'] """ def default_compilers(): build_platform = Utils.unversioned_sys_platform() possible_compiler_list = cxx_compiler.get(build_platform, cxx_compiler['default']) return ' '.join(possible_compiler_list) def configure(conf): """ Detects a suitable C++ compiler :raises: :py:class:`waflib.Errors.ConfigurationError` when no suitable compiler is found """ try: test_for_compiler = conf.options.check_cxx_compiler or default_compilers() except AttributeError: conf.fatal("Add options(opt): opt.load('compiler_cxx')") for compiler in re.split('[ ,]+', test_for_compiler): conf.env.stash() conf.start_msg('Checking for %r (C++ compiler)' % compiler) try: conf.load(compiler) except conf.errors.ConfigurationError as e: conf.env.revert() conf.end_msg(False) debug('compiler_cxx: %r', e) else: if conf.env.CXX: conf.end_msg(conf.env.get_flat('CXX')) conf.env.COMPILER_CXX = compiler conf.env.commit() break conf.env.revert() conf.end_msg(False) else: conf.fatal('could not configure a C++ compiler!') def options(opt): """ This is how to provide compiler preferences on the command-line:: $ waf configure --check-cxx-compiler=gxx """ test_for_compiler = default_compilers() opt.load_special_tools('cxx_*.py') cxx_compiler_opts = opt.add_option_group('Configuration options') cxx_compiler_opts.add_option('--check-cxx-compiler', default=None, help='list of C++ compilers to try [%s]' % test_for_compiler, dest="check_cxx_compiler") for x in test_for_compiler.split(): opt.load('%s' % x) tdb-1.4.2/third_party/waf/waflib/Tools/compiler_d.py0000660000000000000000000000433113444661622022375 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Carlos Rafael Giani, 2007 (dv) # Thomas Nagy, 2016-2018 (ita) """ Try to detect a D compiler from the list of supported compilers:: def options(opt): opt.load('compiler_d') def configure(cnf): cnf.load('compiler_d') def build(bld): bld.program(source='main.d', target='app') Only three D compilers are really present at the moment: * gdc * dmd, the ldc compiler having a very similar command-line interface * ldc2 """ import re from waflib import Utils, Logs d_compiler = { 'default' : ['gdc', 'dmd', 'ldc2'] } """ Dict mapping the platform names to lists of names of D compilers to try, in order of preference:: from waflib.Tools.compiler_d import d_compiler d_compiler['default'] = ['gdc', 'dmd', 'ldc2'] """ def default_compilers(): build_platform = Utils.unversioned_sys_platform() possible_compiler_list = d_compiler.get(build_platform, d_compiler['default']) return ' '.join(possible_compiler_list) def configure(conf): """ Detects a suitable D compiler :raises: :py:class:`waflib.Errors.ConfigurationError` when no suitable compiler is found """ try: test_for_compiler = conf.options.check_d_compiler or default_compilers() except AttributeError: conf.fatal("Add options(opt): opt.load('compiler_d')") for compiler in re.split('[ ,]+', test_for_compiler): conf.env.stash() conf.start_msg('Checking for %r (D compiler)' % compiler) try: conf.load(compiler) except conf.errors.ConfigurationError as e: conf.env.revert() conf.end_msg(False) Logs.debug('compiler_d: %r', e) else: if conf.env.D: conf.end_msg(conf.env.get_flat('D')) conf.env.COMPILER_D = compiler conf.env.commit() break conf.env.revert() conf.end_msg(False) else: conf.fatal('could not configure a D compiler!') def options(opt): """ This is how to provide compiler preferences on the command-line:: $ waf configure --check-d-compiler=dmd """ test_for_compiler = default_compilers() d_compiler_opts = opt.add_option_group('Configuration options') d_compiler_opts.add_option('--check-d-compiler', default=None, help='list of D compilers to try [%s]' % test_for_compiler, dest='check_d_compiler') for x in test_for_compiler.split(): opt.load('%s' % x) tdb-1.4.2/third_party/waf/waflib/Tools/compiler_fc.py0000660000000000000000000000416213444661622022544 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 import re from waflib import Utils, Logs from waflib.Tools import fc fc_compiler = { 'win32' : ['gfortran','ifort'], 'darwin' : ['gfortran', 'g95', 'ifort'], 'linux' : ['gfortran', 'g95', 'ifort'], 'java' : ['gfortran', 'g95', 'ifort'], 'default': ['gfortran'], 'aix' : ['gfortran'] } """ Dict mapping the platform names to lists of names of Fortran compilers to try, in order of preference:: from waflib.Tools.compiler_c import c_compiler c_compiler['linux'] = ['gfortran', 'g95', 'ifort'] """ def default_compilers(): build_platform = Utils.unversioned_sys_platform() possible_compiler_list = fc_compiler.get(build_platform, fc_compiler['default']) return ' '.join(possible_compiler_list) def configure(conf): """ Detects a suitable Fortran compiler :raises: :py:class:`waflib.Errors.ConfigurationError` when no suitable compiler is found """ try: test_for_compiler = conf.options.check_fortran_compiler or default_compilers() except AttributeError: conf.fatal("Add options(opt): opt.load('compiler_fc')") for compiler in re.split('[ ,]+', test_for_compiler): conf.env.stash() conf.start_msg('Checking for %r (Fortran compiler)' % compiler) try: conf.load(compiler) except conf.errors.ConfigurationError as e: conf.env.revert() conf.end_msg(False) Logs.debug('compiler_fortran: %r', e) else: if conf.env.FC: conf.end_msg(conf.env.get_flat('FC')) conf.env.COMPILER_FORTRAN = compiler conf.env.commit() break conf.env.revert() conf.end_msg(False) else: conf.fatal('could not configure a Fortran compiler!') def options(opt): """ This is how to provide compiler preferences on the command-line:: $ waf configure --check-fortran-compiler=ifort """ test_for_compiler = default_compilers() opt.load_special_tools('fc_*.py') fortran_compiler_opts = opt.add_option_group('Configuration options') fortran_compiler_opts.add_option('--check-fortran-compiler', default=None, help='list of Fortran compiler to try [%s]' % test_for_compiler, dest="check_fortran_compiler") for x in test_for_compiler.split(): opt.load('%s' % x) tdb-1.4.2/third_party/waf/waflib/Tools/cs.py0000660000000000000000000001437513444661622020676 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2018 (ita) """ C# support. A simple example:: def configure(conf): conf.load('cs') def build(bld): bld(features='cs', source='main.cs', gen='foo') Note that the configuration may compile C# snippets:: FRAG = ''' namespace Moo { public class Test { public static int Main(string[] args) { return 0; } } }''' def configure(conf): conf.check(features='cs', fragment=FRAG, compile_filename='test.cs', gen='test.exe', bintype='exe', csflags=['-pkg:gtk-sharp-2.0'], msg='Checking for Gtksharp support') """ from waflib import Utils, Task, Options, Errors from waflib.TaskGen import before_method, after_method, feature from waflib.Tools import ccroot from waflib.Configure import conf ccroot.USELIB_VARS['cs'] = set(['CSFLAGS', 'ASSEMBLIES', 'RESOURCES']) ccroot.lib_patterns['csshlib'] = ['%s'] @feature('cs') @before_method('process_source') def apply_cs(self): """ Create a C# task bound to the attribute *cs_task*. There can be only one C# task by task generator. """ cs_nodes = [] no_nodes = [] for x in self.to_nodes(self.source): if x.name.endswith('.cs'): cs_nodes.append(x) else: no_nodes.append(x) self.source = no_nodes bintype = getattr(self, 'bintype', self.gen.endswith('.dll') and 'library' or 'exe') self.cs_task = tsk = self.create_task('mcs', cs_nodes, self.path.find_or_declare(self.gen)) tsk.env.CSTYPE = '/target:%s' % bintype tsk.env.OUT = '/out:%s' % tsk.outputs[0].abspath() self.env.append_value('CSFLAGS', '/platform:%s' % getattr(self, 'platform', 'anycpu')) inst_to = getattr(self, 'install_path', bintype=='exe' and '${BINDIR}' or '${LIBDIR}') if inst_to: # note: we are making a copy, so the files added to cs_task.outputs won't be installed automatically mod = getattr(self, 'chmod', bintype=='exe' and Utils.O755 or Utils.O644) self.install_task = self.add_install_files(install_to=inst_to, install_from=self.cs_task.outputs[:], chmod=mod) @feature('cs') @after_method('apply_cs') def use_cs(self): """ C# applications honor the **use** keyword:: def build(bld): bld(features='cs', source='My.cs', bintype='library', gen='my.dll', name='mylib') bld(features='cs', source='Hi.cs', includes='.', bintype='exe', gen='hi.exe', use='mylib', name='hi') """ names = self.to_list(getattr(self, 'use', [])) get = self.bld.get_tgen_by_name for x in names: try: y = get(x) except Errors.WafError: self.env.append_value('CSFLAGS', '/reference:%s' % x) continue y.post() tsk = getattr(y, 'cs_task', None) or getattr(y, 'link_task', None) if not tsk: self.bld.fatal('cs task has no link task for use %r' % self) self.cs_task.dep_nodes.extend(tsk.outputs) # dependency self.cs_task.set_run_after(tsk) # order (redundant, the order is inferred from the nodes inputs/outputs) self.env.append_value('CSFLAGS', '/reference:%s' % tsk.outputs[0].abspath()) @feature('cs') @after_method('apply_cs', 'use_cs') def debug_cs(self): """ The C# targets may create .mdb or .pdb files:: def build(bld): bld(features='cs', source='My.cs', bintype='library', gen='my.dll', csdebug='full') # csdebug is a value in (True, 'full', 'pdbonly') """ csdebug = getattr(self, 'csdebug', self.env.CSDEBUG) if not csdebug: return node = self.cs_task.outputs[0] if self.env.CS_NAME == 'mono': out = node.parent.find_or_declare(node.name + '.mdb') else: out = node.change_ext('.pdb') self.cs_task.outputs.append(out) if getattr(self, 'install_task', None): self.pdb_install_task = self.add_install_files( install_to=self.install_task.install_to, install_from=out) if csdebug == 'pdbonly': val = ['/debug+', '/debug:pdbonly'] elif csdebug == 'full': val = ['/debug+', '/debug:full'] else: val = ['/debug-'] self.env.append_value('CSFLAGS', val) @feature('cs') @after_method('debug_cs') def doc_cs(self): """ The C# targets may create .xml documentation files:: def build(bld): bld(features='cs', source='My.cs', bintype='library', gen='my.dll', csdoc=True) # csdoc is a boolean value """ csdoc = getattr(self, 'csdoc', self.env.CSDOC) if not csdoc: return node = self.cs_task.outputs[0] out = node.change_ext('.xml') self.cs_task.outputs.append(out) if getattr(self, 'install_task', None): self.doc_install_task = self.add_install_files( install_to=self.install_task.install_to, install_from=out) self.env.append_value('CSFLAGS', '/doc:%s' % out.abspath()) class mcs(Task.Task): """ Compile C# files """ color = 'YELLOW' run_str = '${MCS} ${CSTYPE} ${CSFLAGS} ${ASS_ST:ASSEMBLIES} ${RES_ST:RESOURCES} ${OUT} ${SRC}' def split_argfile(self, cmd): inline = [cmd[0]] infile = [] for x in cmd[1:]: # csc doesn't want /noconfig in @file if x.lower() == '/noconfig': inline.append(x) else: infile.append(self.quote_flag(x)) return (inline, infile) def configure(conf): """ Find a C# compiler, set the variable MCS for the compiler and CS_NAME (mono or csc) """ csc = getattr(Options.options, 'cscbinary', None) if csc: conf.env.MCS = csc conf.find_program(['csc', 'mcs', 'gmcs'], var='MCS') conf.env.ASS_ST = '/r:%s' conf.env.RES_ST = '/resource:%s' conf.env.CS_NAME = 'csc' if str(conf.env.MCS).lower().find('mcs') > -1: conf.env.CS_NAME = 'mono' def options(opt): """ Add a command-line option for the configuration:: $ waf configure --with-csc-binary=/foo/bar/mcs """ opt.add_option('--with-csc-binary', type='string', dest='cscbinary') class fake_csshlib(Task.Task): """ Task used for reading a foreign .net assembly and adding the dependency on it """ color = 'YELLOW' inst_to = None def runnable_status(self): return Task.SKIP_ME @conf def read_csshlib(self, name, paths=[]): """ Read a foreign .net assembly for the *use* system:: def build(bld): bld.read_csshlib('ManagedLibrary.dll', paths=[bld.env.mylibrarypath]) bld(features='cs', source='Hi.cs', bintype='exe', gen='hi.exe', use='ManagedLibrary.dll') :param name: Name of the library :type name: string :param paths: Folders in which the library may be found :type paths: list of string :return: A task generator having the feature *fake_lib* which will call :py:func:`waflib.Tools.ccroot.process_lib` :rtype: :py:class:`waflib.TaskGen.task_gen` """ return self(name=name, features='fake_lib', lib_paths=paths, lib_type='csshlib') tdb-1.4.2/third_party/waf/waflib/Tools/cxx.py0000660000000000000000000000312613444661622021063 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005-2018 (ita) "Base for c++ programs and libraries" from waflib import TaskGen, Task from waflib.Tools import c_preproc from waflib.Tools.ccroot import link_task, stlink_task @TaskGen.extension('.cpp','.cc','.cxx','.C','.c++') def cxx_hook(self, node): "Binds c++ file extensions to create :py:class:`waflib.Tools.cxx.cxx` instances" return self.create_compiled_task('cxx', node) if not '.c' in TaskGen.task_gen.mappings: TaskGen.task_gen.mappings['.c'] = TaskGen.task_gen.mappings['.cpp'] class cxx(Task.Task): "Compiles C++ files into object files" run_str = '${CXX} ${ARCH_ST:ARCH} ${CXXFLAGS} ${FRAMEWORKPATH_ST:FRAMEWORKPATH} ${CPPPATH_ST:INCPATHS} ${DEFINES_ST:DEFINES} ${CXX_SRC_F}${SRC} ${CXX_TGT_F}${TGT[0].abspath()} ${CPPFLAGS}' vars = ['CXXDEPS'] # unused variable to depend on, just in case ext_in = ['.h'] # set the build order easily by using ext_out=['.h'] scan = c_preproc.scan class cxxprogram(link_task): "Links object files into c++ programs" run_str = '${LINK_CXX} ${LINKFLAGS} ${CXXLNK_SRC_F}${SRC} ${CXXLNK_TGT_F}${TGT[0].abspath()} ${RPATH_ST:RPATH} ${FRAMEWORKPATH_ST:FRAMEWORKPATH} ${FRAMEWORK_ST:FRAMEWORK} ${ARCH_ST:ARCH} ${STLIB_MARKER} ${STLIBPATH_ST:STLIBPATH} ${STLIB_ST:STLIB} ${SHLIB_MARKER} ${LIBPATH_ST:LIBPATH} ${LIB_ST:LIB} ${LDFLAGS}' vars = ['LINKDEPS'] ext_out = ['.bin'] inst_to = '${BINDIR}' class cxxshlib(cxxprogram): "Links object files into c++ shared libraries" inst_to = '${LIBDIR}' class cxxstlib(stlink_task): "Links object files into c++ static libraries" pass # do not remove tdb-1.4.2/third_party/waf/waflib/Tools/d.py0000660000000000000000000000570613444661622020512 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Carlos Rafael Giani, 2007 (dv) # Thomas Nagy, 2007-2018 (ita) from waflib import Utils, Task, Errors from waflib.TaskGen import taskgen_method, feature, extension from waflib.Tools import d_scan, d_config from waflib.Tools.ccroot import link_task, stlink_task class d(Task.Task): "Compile a d file into an object file" color = 'GREEN' run_str = '${D} ${DFLAGS} ${DINC_ST:INCPATHS} ${D_SRC_F:SRC} ${D_TGT_F:TGT}' scan = d_scan.scan class d_with_header(d): "Compile a d file and generate a header" run_str = '${D} ${DFLAGS} ${DINC_ST:INCPATHS} ${D_HDR_F:tgt.outputs[1].bldpath()} ${D_SRC_F:SRC} ${D_TGT_F:tgt.outputs[0].bldpath()}' class d_header(Task.Task): "Compile d headers" color = 'BLUE' run_str = '${D} ${D_HEADER} ${SRC}' class dprogram(link_task): "Link object files into a d program" run_str = '${D_LINKER} ${LINKFLAGS} ${DLNK_SRC_F}${SRC} ${DLNK_TGT_F:TGT} ${RPATH_ST:RPATH} ${DSTLIB_MARKER} ${DSTLIBPATH_ST:STLIBPATH} ${DSTLIB_ST:STLIB} ${DSHLIB_MARKER} ${DLIBPATH_ST:LIBPATH} ${DSHLIB_ST:LIB}' inst_to = '${BINDIR}' class dshlib(dprogram): "Link object files into a d shared library" inst_to = '${LIBDIR}' class dstlib(stlink_task): "Link object files into a d static library" pass # do not remove @extension('.d', '.di', '.D') def d_hook(self, node): """ Compile *D* files. To get .di files as well as .o files, set the following:: def build(bld): bld.program(source='foo.d', target='app', generate_headers=True) """ ext = Utils.destos_to_binfmt(self.env.DEST_OS) == 'pe' and 'obj' or 'o' out = '%s.%d.%s' % (node.name, self.idx, ext) def create_compiled_task(self, name, node): task = self.create_task(name, node, node.parent.find_or_declare(out)) try: self.compiled_tasks.append(task) except AttributeError: self.compiled_tasks = [task] return task if getattr(self, 'generate_headers', None): tsk = create_compiled_task(self, 'd_with_header', node) tsk.outputs.append(node.change_ext(self.env.DHEADER_ext)) else: tsk = create_compiled_task(self, 'd', node) return tsk @taskgen_method def generate_header(self, filename): """ See feature request #104:: def build(bld): tg = bld.program(source='foo.d', target='app') tg.generate_header('blah.d') # is equivalent to: #tg = bld.program(source='foo.d', target='app', header_lst='blah.d') :param filename: header to create :type filename: string """ try: self.header_lst.append([filename, self.install_path]) except AttributeError: self.header_lst = [[filename, self.install_path]] @feature('d') def process_header(self): """ Process the attribute 'header_lst' to create the d header compilation tasks:: def build(bld): bld.program(source='foo.d', target='app', header_lst='blah.d') """ for i in getattr(self, 'header_lst', []): node = self.path.find_resource(i[0]) if not node: raise Errors.WafError('file %r not found on d obj' % i[0]) self.create_task('d_header', node, node.change_ext('.di')) tdb-1.4.2/third_party/waf/waflib/Tools/d_config.py0000660000000000000000000000260713444661622022034 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2016-2018 (ita) from waflib import Utils from waflib.Configure import conf @conf def d_platform_flags(self): """ Sets the extensions dll/so for d programs and libraries """ v = self.env if not v.DEST_OS: v.DEST_OS = Utils.unversioned_sys_platform() binfmt = Utils.destos_to_binfmt(self.env.DEST_OS) if binfmt == 'pe': v.dprogram_PATTERN = '%s.exe' v.dshlib_PATTERN = 'lib%s.dll' v.dstlib_PATTERN = 'lib%s.a' elif binfmt == 'mac-o': v.dprogram_PATTERN = '%s' v.dshlib_PATTERN = 'lib%s.dylib' v.dstlib_PATTERN = 'lib%s.a' else: v.dprogram_PATTERN = '%s' v.dshlib_PATTERN = 'lib%s.so' v.dstlib_PATTERN = 'lib%s.a' DLIB = ''' version(D_Version2) { import std.stdio; int main() { writefln("phobos2"); return 0; } } else { version(Tango) { import tango.stdc.stdio; int main() { printf("tango"); return 0; } } else { import std.stdio; int main() { writefln("phobos1"); return 0; } } } ''' """Detection string for the D standard library""" @conf def check_dlibrary(self, execute=True): """ Detects the kind of standard library that comes with the compiler, and sets conf.env.DLIBRARY to tango, phobos1 or phobos2 """ ret = self.check_cc(features='d dprogram', fragment=DLIB, compile_filename='test.d', execute=execute, define_ret=True) if execute: self.env.DLIBRARY = ret.strip() tdb-1.4.2/third_party/waf/waflib/Tools/d_scan.py0000660000000000000000000001170013527011455021500 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2016-2018 (ita) """ Provide a scanner for finding dependencies on d files """ import re from waflib import Utils def filter_comments(filename): """ :param filename: d file name :type filename: string :rtype: list :return: a list of characters """ txt = Utils.readf(filename) i = 0 buf = [] max = len(txt) begin = 0 while i < max: c = txt[i] if c == '"' or c == "'": # skip a string or character literal buf.append(txt[begin:i]) delim = c i += 1 while i < max: c = txt[i] if c == delim: break elif c == '\\': # skip the character following backslash i += 1 i += 1 i += 1 begin = i elif c == '/': # try to replace a comment with whitespace buf.append(txt[begin:i]) i += 1 if i == max: break c = txt[i] if c == '+': # eat nesting /+ +/ comment i += 1 nesting = 1 c = None while i < max: prev = c c = txt[i] if prev == '/' and c == '+': nesting += 1 c = None elif prev == '+' and c == '/': nesting -= 1 if nesting == 0: break c = None i += 1 elif c == '*': # eat /* */ comment i += 1 c = None while i < max: prev = c c = txt[i] if prev == '*' and c == '/': break i += 1 elif c == '/': # eat // comment i += 1 while i < max and txt[i] != '\n': i += 1 else: # no comment begin = i - 1 continue i += 1 begin = i buf.append(' ') else: i += 1 buf.append(txt[begin:]) return buf class d_parser(object): """ Parser for d files """ def __init__(self, env, incpaths): #self.code = '' #self.module = '' #self.imports = [] self.allnames = [] self.re_module = re.compile(r"module\s+([^;]+)") self.re_import = re.compile(r"import\s+([^;]+)") self.re_import_bindings = re.compile("([^:]+):(.*)") self.re_import_alias = re.compile("[^=]+=(.+)") self.env = env self.nodes = [] self.names = [] self.incpaths = incpaths def tryfind(self, filename): """ Search file a file matching an module/import directive :param filename: file to read :type filename: string """ found = 0 for n in self.incpaths: found = n.find_resource(filename.replace('.', '/') + '.d') if found: self.nodes.append(found) self.waiting.append(found) break if not found: if not filename in self.names: self.names.append(filename) def get_strings(self, code): """ :param code: d code to parse :type code: string :return: the modules that the code uses :rtype: a list of match objects """ #self.imports = [] self.module = '' lst = [] # get the module name (if present) mod_name = self.re_module.search(code) if mod_name: self.module = re.sub(r'\s+', '', mod_name.group(1)) # strip all whitespaces # go through the code, have a look at all import occurrences # first, lets look at anything beginning with "import" and ending with ";" import_iterator = self.re_import.finditer(code) if import_iterator: for import_match in import_iterator: import_match_str = re.sub(r'\s+', '', import_match.group(1)) # strip all whitespaces # does this end with an import bindings declaration? # (import bindings always terminate the list of imports) bindings_match = self.re_import_bindings.match(import_match_str) if bindings_match: import_match_str = bindings_match.group(1) # if so, extract the part before the ":" (since the module declaration(s) is/are located there) # split the matching string into a bunch of strings, separated by a comma matches = import_match_str.split(',') for match in matches: alias_match = self.re_import_alias.match(match) if alias_match: # is this an alias declaration? (alias = module name) if so, extract the module name match = alias_match.group(1) lst.append(match) return lst def start(self, node): """ The parsing starts here :param node: input file :type node: :py:class:`waflib.Node.Node` """ self.waiting = [node] # while the stack is not empty, add the dependencies while self.waiting: nd = self.waiting.pop(0) self.iter(nd) def iter(self, node): """ Find all the modules that a file depends on, uses :py:meth:`waflib.Tools.d_scan.d_parser.tryfind` to process dependent files :param node: input file :type node: :py:class:`waflib.Node.Node` """ path = node.abspath() # obtain the absolute path code = "".join(filter_comments(path)) # read the file and filter the comments names = self.get_strings(code) # obtain the import strings for x in names: # optimization if x in self.allnames: continue self.allnames.append(x) # for each name, see if it is like a node or not self.tryfind(x) def scan(self): "look for .d/.di used by a d file" env = self.env gruik = d_parser(env, self.generator.includes_nodes) node = self.inputs[0] gruik.start(node) nodes = gruik.nodes names = gruik.names return (nodes, names) tdb-1.4.2/third_party/waf/waflib/Tools/dbus.py0000660000000000000000000000401613444661622021215 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Ali Sabil, 2007 """ Compiles dbus files with **dbus-binding-tool** Typical usage:: def options(opt): opt.load('compiler_c dbus') def configure(conf): conf.load('compiler_c dbus') def build(bld): tg = bld.program( includes = '.', source = bld.path.ant_glob('*.c'), target = 'gnome-hello') tg.add_dbus_file('test.xml', 'test_prefix', 'glib-server') """ from waflib import Task, Errors from waflib.TaskGen import taskgen_method, before_method @taskgen_method def add_dbus_file(self, filename, prefix, mode): """ Adds a dbus file to the list of dbus files to process. Store them in the attribute *dbus_lst*. :param filename: xml file to compile :type filename: string :param prefix: dbus binding tool prefix (--prefix=prefix) :type prefix: string :param mode: dbus binding tool mode (--mode=mode) :type mode: string """ if not hasattr(self, 'dbus_lst'): self.dbus_lst = [] if not 'process_dbus' in self.meths: self.meths.append('process_dbus') self.dbus_lst.append([filename, prefix, mode]) @before_method('process_source') def process_dbus(self): """ Processes the dbus files stored in the attribute *dbus_lst* to create :py:class:`waflib.Tools.dbus.dbus_binding_tool` instances. """ for filename, prefix, mode in getattr(self, 'dbus_lst', []): node = self.path.find_resource(filename) if not node: raise Errors.WafError('file not found ' + filename) tsk = self.create_task('dbus_binding_tool', node, node.change_ext('.h')) tsk.env.DBUS_BINDING_TOOL_PREFIX = prefix tsk.env.DBUS_BINDING_TOOL_MODE = mode class dbus_binding_tool(Task.Task): """ Compiles a dbus file """ color = 'BLUE' ext_out = ['.h'] run_str = '${DBUS_BINDING_TOOL} --prefix=${DBUS_BINDING_TOOL_PREFIX} --mode=${DBUS_BINDING_TOOL_MODE} --output=${TGT} ${SRC}' shell = True # temporary workaround for #795 def configure(conf): """ Detects the program dbus-binding-tool and sets ``conf.env.DBUS_BINDING_TOOL`` """ conf.find_program('dbus-binding-tool', var='DBUS_BINDING_TOOL') tdb-1.4.2/third_party/waf/waflib/Tools/dmd.py0000660000000000000000000000353013444661622021024 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Carlos Rafael Giani, 2007 (dv) # Thomas Nagy, 2008-2018 (ita) import sys from waflib.Tools import ar, d from waflib.Configure import conf @conf def find_dmd(conf): """ Finds the program *dmd*, *dmd2*, or *ldc* and set the variable *D* """ conf.find_program(['dmd', 'dmd2', 'ldc'], var='D') # make sure that we're dealing with dmd1, dmd2, or ldc(1) out = conf.cmd_and_log(conf.env.D + ['--help']) if out.find("D Compiler v") == -1: out = conf.cmd_and_log(conf.env.D + ['-version']) if out.find("based on DMD v1.") == -1: conf.fatal("detected compiler is not dmd/ldc") @conf def common_flags_ldc(conf): """ Sets the D flags required by *ldc* """ v = conf.env v.DFLAGS = ['-d-version=Posix'] v.LINKFLAGS = [] v.DFLAGS_dshlib = ['-relocation-model=pic'] @conf def common_flags_dmd(conf): """ Set the flags required by *dmd* or *dmd2* """ v = conf.env v.D_SRC_F = ['-c'] v.D_TGT_F = '-of%s' v.D_LINKER = v.D v.DLNK_SRC_F = '' v.DLNK_TGT_F = '-of%s' v.DINC_ST = '-I%s' v.DSHLIB_MARKER = v.DSTLIB_MARKER = '' v.DSTLIB_ST = v.DSHLIB_ST = '-L-l%s' v.DSTLIBPATH_ST = v.DLIBPATH_ST = '-L-L%s' v.LINKFLAGS_dprogram= ['-quiet'] v.DFLAGS_dshlib = ['-fPIC'] v.LINKFLAGS_dshlib = ['-L-shared'] v.DHEADER_ext = '.di' v.DFLAGS_d_with_header = ['-H', '-Hf'] v.D_HDR_F = '%s' def configure(conf): """ Configuration for *dmd*, *dmd2*, and *ldc* """ conf.find_dmd() if sys.platform == 'win32': out = conf.cmd_and_log(conf.env.D + ['--help']) if out.find('D Compiler v2.') > -1: conf.fatal('dmd2 on Windows is not supported, use gdc or ldc2 instead') conf.load('ar') conf.load('d') conf.common_flags_dmd() conf.d_platform_flags() if str(conf.env.D).find('ldc') > -1: conf.common_flags_ldc() tdb-1.4.2/third_party/waf/waflib/Tools/errcheck.py0000660000000000000000000001722213444661622022051 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2011 (ita) """ Common mistakes highlighting. There is a performance impact, so this tool is only loaded when running ``waf -v`` """ typos = { 'feature':'features', 'sources':'source', 'targets':'target', 'include':'includes', 'export_include':'export_includes', 'define':'defines', 'importpath':'includes', 'installpath':'install_path', 'iscopy':'is_copy', 'uses':'use', } meths_typos = ['__call__', 'program', 'shlib', 'stlib', 'objects'] import sys from waflib import Logs, Build, Node, Task, TaskGen, ConfigSet, Errors, Utils from waflib.Tools import ccroot def check_same_targets(self): mp = Utils.defaultdict(list) uids = {} def check_task(tsk): if not isinstance(tsk, Task.Task): return if hasattr(tsk, 'no_errcheck_out'): return for node in tsk.outputs: mp[node].append(tsk) try: uids[tsk.uid()].append(tsk) except KeyError: uids[tsk.uid()] = [tsk] for g in self.groups: for tg in g: try: for tsk in tg.tasks: check_task(tsk) except AttributeError: # raised if not a task generator, which should be uncommon check_task(tg) dupe = False for (k, v) in mp.items(): if len(v) > 1: dupe = True msg = '* Node %r is created more than once%s. The task generators are:' % (k, Logs.verbose == 1 and " (full message on 'waf -v -v')" or "") Logs.error(msg) for x in v: if Logs.verbose > 1: Logs.error(' %d. %r', 1 + v.index(x), x.generator) else: Logs.error(' %d. %r in %r', 1 + v.index(x), x.generator.name, getattr(x.generator, 'path', None)) Logs.error('If you think that this is an error, set no_errcheck_out on the task instance') if not dupe: for (k, v) in uids.items(): if len(v) > 1: Logs.error('* Several tasks use the same identifier. Please check the information on\n https://waf.io/apidocs/Task.html?highlight=uid#waflib.Task.Task.uid') tg_details = tsk.generator.name if Logs.verbose > 2: tg_details = tsk.generator for tsk in v: Logs.error(' - object %r (%r) defined in %r', tsk.__class__.__name__, tsk, tg_details) def check_invalid_constraints(self): feat = set() for x in list(TaskGen.feats.values()): feat.union(set(x)) for (x, y) in TaskGen.task_gen.prec.items(): feat.add(x) feat.union(set(y)) ext = set() for x in TaskGen.task_gen.mappings.values(): ext.add(x.__name__) invalid = ext & feat if invalid: Logs.error('The methods %r have invalid annotations: @extension <-> @feature/@before_method/@after_method', list(invalid)) # the build scripts have been read, so we can check for invalid after/before attributes on task classes for cls in list(Task.classes.values()): if sys.hexversion > 0x3000000 and issubclass(cls, Task.Task) and isinstance(cls.hcode, str): raise Errors.WafError('Class %r has hcode value %r of type , expecting (use Utils.h_cmd() ?)' % (cls, cls.hcode)) for x in ('before', 'after'): for y in Utils.to_list(getattr(cls, x, [])): if not Task.classes.get(y): Logs.error('Erroneous order constraint %r=%r on task class %r', x, y, cls.__name__) if getattr(cls, 'rule', None): Logs.error('Erroneous attribute "rule" on task class %r (rename to "run_str")', cls.__name__) def replace(m): """ Replaces existing BuildContext methods to verify parameter names, for example ``bld(source=)`` has no ending *s* """ oldcall = getattr(Build.BuildContext, m) def call(self, *k, **kw): ret = oldcall(self, *k, **kw) for x in typos: if x in kw: if x == 'iscopy' and 'subst' in getattr(self, 'features', ''): continue Logs.error('Fix the typo %r -> %r on %r', x, typos[x], ret) return ret setattr(Build.BuildContext, m, call) def enhance_lib(): """ Modifies existing classes and methods to enable error verification """ for m in meths_typos: replace(m) # catch '..' in ant_glob patterns def ant_glob(self, *k, **kw): if k: lst = Utils.to_list(k[0]) for pat in lst: sp = pat.split('/') if '..' in sp: Logs.error("In ant_glob pattern %r: '..' means 'two dots', not 'parent directory'", k[0]) if '.' in sp: Logs.error("In ant_glob pattern %r: '.' means 'one dot', not 'current directory'", k[0]) return self.old_ant_glob(*k, **kw) Node.Node.old_ant_glob = Node.Node.ant_glob Node.Node.ant_glob = ant_glob # catch ant_glob on build folders def ant_iter(self, accept=None, maxdepth=25, pats=[], dir=False, src=True, remove=True, quiet=False): if remove: try: if self.is_child_of(self.ctx.bldnode) and not quiet: quiet = True Logs.error('Calling ant_glob on build folders (%r) is dangerous: add quiet=True / remove=False', self) except AttributeError: pass return self.old_ant_iter(accept, maxdepth, pats, dir, src, remove, quiet) Node.Node.old_ant_iter = Node.Node.ant_iter Node.Node.ant_iter = ant_iter # catch conflicting ext_in/ext_out/before/after declarations old = Task.is_before def is_before(t1, t2): ret = old(t1, t2) if ret and old(t2, t1): Logs.error('Contradictory order constraints in classes %r %r', t1, t2) return ret Task.is_before = is_before # check for bld(feature='cshlib') where no 'c' is given - this can be either a mistake or on purpose # so we only issue a warning def check_err_features(self): lst = self.to_list(self.features) if 'shlib' in lst: Logs.error('feature shlib -> cshlib, dshlib or cxxshlib') for x in ('c', 'cxx', 'd', 'fc'): if not x in lst and lst and lst[0] in [x+y for y in ('program', 'shlib', 'stlib')]: Logs.error('%r features is probably missing %r', self, x) TaskGen.feature('*')(check_err_features) # check for erroneous order constraints def check_err_order(self): if not hasattr(self, 'rule') and not 'subst' in Utils.to_list(self.features): for x in ('before', 'after', 'ext_in', 'ext_out'): if hasattr(self, x): Logs.warn('Erroneous order constraint %r on non-rule based task generator %r', x, self) else: for x in ('before', 'after'): for y in self.to_list(getattr(self, x, [])): if not Task.classes.get(y): Logs.error('Erroneous order constraint %s=%r on %r (no such class)', x, y, self) TaskGen.feature('*')(check_err_order) # check for @extension used with @feature/@before_method/@after_method def check_compile(self): check_invalid_constraints(self) try: ret = self.orig_compile() finally: check_same_targets(self) return ret Build.BuildContext.orig_compile = Build.BuildContext.compile Build.BuildContext.compile = check_compile # check for invalid build groups #914 def use_rec(self, name, **kw): try: y = self.bld.get_tgen_by_name(name) except Errors.WafError: pass else: idx = self.bld.get_group_idx(self) odx = self.bld.get_group_idx(y) if odx > idx: msg = "Invalid 'use' across build groups:" if Logs.verbose > 1: msg += '\n target %r\n uses:\n %r' % (self, y) else: msg += " %r uses %r (try 'waf -v -v' for the full error)" % (self.name, name) raise Errors.WafError(msg) self.orig_use_rec(name, **kw) TaskGen.task_gen.orig_use_rec = TaskGen.task_gen.use_rec TaskGen.task_gen.use_rec = use_rec # check for env.append def _getattr(self, name, default=None): if name == 'append' or name == 'add': raise Errors.WafError('env.append and env.add do not exist: use env.append_value/env.append_unique') elif name == 'prepend': raise Errors.WafError('env.prepend does not exist: use env.prepend_value') if name in self.__slots__: return super(ConfigSet.ConfigSet, self).__getattr__(name, default) else: return self[name] ConfigSet.ConfigSet.__getattr__ = _getattr def options(opt): """ Error verification can be enabled by default (not just on ``waf -v``) by adding to the user script options """ enhance_lib() tdb-1.4.2/third_party/waf/waflib/Tools/fc.py0000660000000000000000000001512513527011455020646 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # DC 2008 # Thomas Nagy 2016-2018 (ita) """ Fortran support """ from waflib import Utils, Task, Errors from waflib.Tools import ccroot, fc_config, fc_scan from waflib.TaskGen import extension from waflib.Configure import conf ccroot.USELIB_VARS['fc'] = set(['FCFLAGS', 'DEFINES', 'INCLUDES', 'FCPPFLAGS']) ccroot.USELIB_VARS['fcprogram_test'] = ccroot.USELIB_VARS['fcprogram'] = set(['LIB', 'STLIB', 'LIBPATH', 'STLIBPATH', 'LINKFLAGS', 'RPATH', 'LINKDEPS']) ccroot.USELIB_VARS['fcshlib'] = set(['LIB', 'STLIB', 'LIBPATH', 'STLIBPATH', 'LINKFLAGS', 'RPATH', 'LINKDEPS']) ccroot.USELIB_VARS['fcstlib'] = set(['ARFLAGS', 'LINKDEPS']) @extension('.f','.F','.f90','.F90','.for','.FOR','.f95','.F95','.f03','.F03','.f08','.F08') def fc_hook(self, node): "Binds the Fortran file extensions create :py:class:`waflib.Tools.fc.fc` instances" return self.create_compiled_task('fc', node) @conf def modfile(conf, name): """ Turns a module name into the right module file name. Defaults to all lower case. """ if name.find(':') >= 0: # Depending on a submodule! separator = conf.env.FC_SUBMOD_SEPARATOR or '@' # Ancestors of the submodule will be prefixed to the # submodule name, separated by a colon. modpath = name.split(':') # Only the ancestor (actual) module and the submodule name # will be used for the filename. modname = modpath[0] + separator + modpath[-1] suffix = conf.env.FC_SUBMOD_SUFFIX or '.smod' else: modname = name suffix = '.mod' return {'lower' :modname.lower() + suffix.lower(), 'lower.MOD' :modname.lower() + suffix.upper(), 'UPPER.mod' :modname.upper() + suffix.lower(), 'UPPER' :modname.upper() + suffix.upper()}[conf.env.FC_MOD_CAPITALIZATION or 'lower'] def get_fortran_tasks(tsk): """ Obtains all fortran tasks from the same build group. Those tasks must not have the attribute 'nomod' or 'mod_fortran_done' :return: a list of :py:class:`waflib.Tools.fc.fc` instances """ bld = tsk.generator.bld tasks = bld.get_tasks_group(bld.get_group_idx(tsk.generator)) return [x for x in tasks if isinstance(x, fc) and not getattr(x, 'nomod', None) and not getattr(x, 'mod_fortran_done', None)] class fc(Task.Task): """ Fortran tasks can only run when all fortran tasks in a current task group are ready to be executed This may cause a deadlock if some fortran task is waiting for something that cannot happen (circular dependency) Should this ever happen, set the 'nomod=True' on those tasks instances to break the loop """ color = 'GREEN' run_str = '${FC} ${FCFLAGS} ${FCINCPATH_ST:INCPATHS} ${FCDEFINES_ST:DEFINES} ${_FCMODOUTFLAGS} ${FC_TGT_F}${TGT[0].abspath()} ${FC_SRC_F}${SRC[0].abspath()} ${FCPPFLAGS}' vars = ["FORTRANMODPATHFLAG"] def scan(self): """Fortran dependency scanner""" tmp = fc_scan.fortran_parser(self.generator.includes_nodes) tmp.task = self tmp.start(self.inputs[0]) return (tmp.nodes, tmp.names) def runnable_status(self): """ Sets the mod file outputs and the dependencies on the mod files over all Fortran tasks executed by the main thread so there are no concurrency issues """ if getattr(self, 'mod_fortran_done', None): return super(fc, self).runnable_status() # now, if we reach this part it is because this fortran task is the first in the list bld = self.generator.bld # obtain the fortran tasks lst = get_fortran_tasks(self) # disable this method for other tasks for tsk in lst: tsk.mod_fortran_done = True # wait for all the .f tasks to be ready for execution # and ensure that the scanners are called at least once for tsk in lst: ret = tsk.runnable_status() if ret == Task.ASK_LATER: # we have to wait for one of the other fortran tasks to be ready # this may deadlock if there are dependencies between fortran tasks # but this should not happen (we are setting them here!) for x in lst: x.mod_fortran_done = None return Task.ASK_LATER ins = Utils.defaultdict(set) outs = Utils.defaultdict(set) # the .mod files to create for tsk in lst: key = tsk.uid() for x in bld.raw_deps[key]: if x.startswith('MOD@'): name = bld.modfile(x.replace('MOD@', '')) node = bld.srcnode.find_or_declare(name) tsk.set_outputs(node) outs[node].add(tsk) # the .mod files to use for tsk in lst: key = tsk.uid() for x in bld.raw_deps[key]: if x.startswith('USE@'): name = bld.modfile(x.replace('USE@', '')) node = bld.srcnode.find_resource(name) if node and node not in tsk.outputs: if not node in bld.node_deps[key]: bld.node_deps[key].append(node) ins[node].add(tsk) # if the intersection matches, set the order for k in ins.keys(): for a in ins[k]: a.run_after.update(outs[k]) for x in outs[k]: self.generator.bld.producer.revdeps[x].add(a) # the scanner cannot output nodes, so we have to set them # ourselves as task.dep_nodes (additional input nodes) tmp = [] for t in outs[k]: tmp.extend(t.outputs) a.dep_nodes.extend(tmp) a.dep_nodes.sort(key=lambda x: x.abspath()) # the task objects have changed: clear the signature cache for tsk in lst: try: delattr(tsk, 'cache_sig') except AttributeError: pass return super(fc, self).runnable_status() class fcprogram(ccroot.link_task): """Links Fortran programs""" color = 'YELLOW' run_str = '${FC} ${LINKFLAGS} ${FCLNK_SRC_F}${SRC} ${FCLNK_TGT_F}${TGT[0].abspath()} ${RPATH_ST:RPATH} ${FCSTLIB_MARKER} ${FCSTLIBPATH_ST:STLIBPATH} ${FCSTLIB_ST:STLIB} ${FCSHLIB_MARKER} ${FCLIBPATH_ST:LIBPATH} ${FCLIB_ST:LIB} ${LDFLAGS}' inst_to = '${BINDIR}' class fcshlib(fcprogram): """Links Fortran libraries""" inst_to = '${LIBDIR}' class fcstlib(ccroot.stlink_task): """Links Fortran static libraries (uses ar by default)""" pass # do not remove the pass statement class fcprogram_test(fcprogram): """Custom link task to obtain compiler outputs for Fortran configuration tests""" def runnable_status(self): """This task is always executed""" ret = super(fcprogram_test, self).runnable_status() if ret == Task.SKIP_ME: ret = Task.RUN_ME return ret def exec_command(self, cmd, **kw): """Stores the compiler std our/err onto the build context, to bld.out + bld.err""" bld = self.generator.bld kw['shell'] = isinstance(cmd, str) kw['stdout'] = kw['stderr'] = Utils.subprocess.PIPE kw['cwd'] = self.get_cwd() bld.out = bld.err = '' bld.to_log('command: %s\n' % cmd) kw['output'] = 0 try: (bld.out, bld.err) = bld.cmd_and_log(cmd, **kw) except Errors.WafError: return -1 if bld.out: bld.to_log('out: %s\n' % bld.out) if bld.err: bld.to_log('err: %s\n' % bld.err) tdb-1.4.2/third_party/waf/waflib/Tools/fc_config.py0000660000000000000000000003324213527011455022173 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # DC 2008 # Thomas Nagy 2016-2018 (ita) """ Fortran configuration helpers """ import re, os, sys, shlex from waflib.Configure import conf from waflib.TaskGen import feature, before_method FC_FRAGMENT = ' program main\n end program main\n' FC_FRAGMENT2 = ' PROGRAM MAIN\n END\n' # what's the actual difference between these? @conf def fc_flags(conf): """ Defines common fortran configuration flags and file extensions """ v = conf.env v.FC_SRC_F = [] v.FC_TGT_F = ['-c', '-o'] v.FCINCPATH_ST = '-I%s' v.FCDEFINES_ST = '-D%s' if not v.LINK_FC: v.LINK_FC = v.FC v.FCLNK_SRC_F = [] v.FCLNK_TGT_F = ['-o'] v.FCFLAGS_fcshlib = ['-fpic'] v.LINKFLAGS_fcshlib = ['-shared'] v.fcshlib_PATTERN = 'lib%s.so' v.fcstlib_PATTERN = 'lib%s.a' v.FCLIB_ST = '-l%s' v.FCLIBPATH_ST = '-L%s' v.FCSTLIB_ST = '-l%s' v.FCSTLIBPATH_ST = '-L%s' v.FCSTLIB_MARKER = '-Wl,-Bstatic' v.FCSHLIB_MARKER = '-Wl,-Bdynamic' v.SONAME_ST = '-Wl,-h,%s' @conf def fc_add_flags(conf): """ Adds FCFLAGS / LDFLAGS / LINKFLAGS from os.environ to conf.env """ conf.add_os_flags('FCPPFLAGS', dup=False) conf.add_os_flags('FCFLAGS', dup=False) conf.add_os_flags('LINKFLAGS', dup=False) conf.add_os_flags('LDFLAGS', dup=False) @conf def check_fortran(self, *k, **kw): """ Compiles a Fortran program to ensure that the settings are correct """ self.check_cc( fragment = FC_FRAGMENT, compile_filename = 'test.f', features = 'fc fcprogram', msg = 'Compiling a simple fortran app') @conf def check_fc(self, *k, **kw): """ Same as :py:func:`waflib.Tools.c_config.check` but defaults to the *Fortran* programming language (this overrides the C defaults in :py:func:`waflib.Tools.c_config.validate_c`) """ kw['compiler'] = 'fc' if not 'compile_mode' in kw: kw['compile_mode'] = 'fc' if not 'type' in kw: kw['type'] = 'fcprogram' if not 'compile_filename' in kw: kw['compile_filename'] = 'test.f90' if not 'code' in kw: kw['code'] = FC_FRAGMENT return self.check(*k, **kw) # ------------------------------------------------------------------------ # --- These are the default platform modifiers, refactored here for # convenience. gfortran and g95 have much overlap. # ------------------------------------------------------------------------ @conf def fortran_modifier_darwin(conf): """ Defines Fortran flags and extensions for OSX systems """ v = conf.env v.FCFLAGS_fcshlib = ['-fPIC'] v.LINKFLAGS_fcshlib = ['-dynamiclib'] v.fcshlib_PATTERN = 'lib%s.dylib' v.FRAMEWORKPATH_ST = '-F%s' v.FRAMEWORK_ST = ['-framework'] v.LINKFLAGS_fcstlib = [] v.FCSHLIB_MARKER = '' v.FCSTLIB_MARKER = '' v.SONAME_ST = '' @conf def fortran_modifier_win32(conf): """ Defines Fortran flags for Windows platforms """ v = conf.env v.fcprogram_PATTERN = v.fcprogram_test_PATTERN = '%s.exe' v.fcshlib_PATTERN = '%s.dll' v.implib_PATTERN = '%s.dll.a' v.IMPLIB_ST = '-Wl,--out-implib,%s' v.FCFLAGS_fcshlib = [] # Auto-import is enabled by default even without this option, # but enabling it explicitly has the nice effect of suppressing the rather boring, debug-level messages # that the linker emits otherwise. v.append_value('LINKFLAGS', ['-Wl,--enable-auto-import']) @conf def fortran_modifier_cygwin(conf): """ Defines Fortran flags for use on cygwin """ fortran_modifier_win32(conf) v = conf.env v.fcshlib_PATTERN = 'cyg%s.dll' v.append_value('LINKFLAGS_fcshlib', ['-Wl,--enable-auto-image-base']) v.FCFLAGS_fcshlib = [] # ------------------------------------------------------------------------ @conf def check_fortran_dummy_main(self, *k, **kw): """ Determines if a main function is needed by compiling a code snippet with the C compiler and linking it with the Fortran compiler (useful on unix-like systems) """ if not self.env.CC: self.fatal('A c compiler is required for check_fortran_dummy_main') lst = ['MAIN__', '__MAIN', '_MAIN', 'MAIN_', 'MAIN'] lst.extend([m.lower() for m in lst]) lst.append('') self.start_msg('Detecting whether we need a dummy main') for main in lst: kw['fortran_main'] = main try: self.check_cc( fragment = 'int %s() { return 0; }\n' % (main or 'test'), features = 'c fcprogram', mandatory = True ) if not main: self.env.FC_MAIN = -1 self.end_msg('no') else: self.env.FC_MAIN = main self.end_msg('yes %s' % main) break except self.errors.ConfigurationError: pass else: self.end_msg('not found') self.fatal('could not detect whether fortran requires a dummy main, see the config.log') # ------------------------------------------------------------------------ GCC_DRIVER_LINE = re.compile('^Driving:') POSIX_STATIC_EXT = re.compile(r'\S+\.a') POSIX_LIB_FLAGS = re.compile(r'-l\S+') @conf def is_link_verbose(self, txt): """Returns True if 'useful' link options can be found in txt""" assert isinstance(txt, str) for line in txt.splitlines(): if not GCC_DRIVER_LINE.search(line): if POSIX_STATIC_EXT.search(line) or POSIX_LIB_FLAGS.search(line): return True return False @conf def check_fortran_verbose_flag(self, *k, **kw): """ Checks what kind of verbose (-v) flag works, then sets it to env.FC_VERBOSE_FLAG """ self.start_msg('fortran link verbose flag') for x in ('-v', '--verbose', '-verbose', '-V'): try: self.check_cc( features = 'fc fcprogram_test', fragment = FC_FRAGMENT2, compile_filename = 'test.f', linkflags = [x], mandatory=True) except self.errors.ConfigurationError: pass else: # output is on stderr or stdout (for xlf) if self.is_link_verbose(self.test_bld.err) or self.is_link_verbose(self.test_bld.out): self.end_msg(x) break else: self.end_msg('failure') self.fatal('Could not obtain the fortran link verbose flag (see config.log)') self.env.FC_VERBOSE_FLAG = x return x # ------------------------------------------------------------------------ # linkflags which match those are ignored LINKFLAGS_IGNORED = [r'-lang*', r'-lcrt[a-zA-Z0-9\.]*\.o', r'-lc$', r'-lSystem', r'-libmil', r'-LIST:*', r'-LNO:*'] if os.name == 'nt': LINKFLAGS_IGNORED.extend([r'-lfrt*', r'-luser32', r'-lkernel32', r'-ladvapi32', r'-lmsvcrt', r'-lshell32', r'-lmingw', r'-lmoldname']) else: LINKFLAGS_IGNORED.append(r'-lgcc*') RLINKFLAGS_IGNORED = [re.compile(f) for f in LINKFLAGS_IGNORED] def _match_ignore(line): """Returns True if the line should be ignored (Fortran verbose flag test)""" for i in RLINKFLAGS_IGNORED: if i.match(line): return True return False def parse_fortran_link(lines): """Given the output of verbose link of Fortran compiler, this returns a list of flags necessary for linking using the standard linker.""" final_flags = [] for line in lines: if not GCC_DRIVER_LINE.match(line): _parse_flink_line(line, final_flags) return final_flags SPACE_OPTS = re.compile('^-[LRuYz]$') NOSPACE_OPTS = re.compile('^-[RL]') def _parse_flink_token(lexer, token, tmp_flags): # Here we go (convention for wildcard is shell, not regex !) # 1 TODO: we first get some root .a libraries # 2 TODO: take everything starting by -bI:* # 3 Ignore the following flags: -lang* | -lcrt*.o | -lc | # -lgcc* | -lSystem | -libmil | -LANG:=* | -LIST:* | -LNO:*) # 4 take into account -lkernel32 # 5 For options of the kind -[[LRuYz]], as they take one argument # after, the actual option is the next token # 6 For -YP,*: take and replace by -Larg where arg is the old # argument # 7 For -[lLR]*: take # step 3 if _match_ignore(token): pass # step 4 elif token.startswith('-lkernel32') and sys.platform == 'cygwin': tmp_flags.append(token) # step 5 elif SPACE_OPTS.match(token): t = lexer.get_token() if t.startswith('P,'): t = t[2:] for opt in t.split(os.pathsep): tmp_flags.append('-L%s' % opt) # step 6 elif NOSPACE_OPTS.match(token): tmp_flags.append(token) # step 7 elif POSIX_LIB_FLAGS.match(token): tmp_flags.append(token) else: # ignore anything not explicitly taken into account pass t = lexer.get_token() return t def _parse_flink_line(line, final_flags): """private""" lexer = shlex.shlex(line, posix = True) lexer.whitespace_split = True t = lexer.get_token() tmp_flags = [] while t: t = _parse_flink_token(lexer, t, tmp_flags) final_flags.extend(tmp_flags) return final_flags @conf def check_fortran_clib(self, autoadd=True, *k, **kw): """ Obtains the flags for linking with the C library if this check works, add uselib='CLIB' to your task generators """ if not self.env.FC_VERBOSE_FLAG: self.fatal('env.FC_VERBOSE_FLAG is not set: execute check_fortran_verbose_flag?') self.start_msg('Getting fortran runtime link flags') try: self.check_cc( fragment = FC_FRAGMENT2, compile_filename = 'test.f', features = 'fc fcprogram_test', linkflags = [self.env.FC_VERBOSE_FLAG] ) except Exception: self.end_msg(False) if kw.get('mandatory', True): conf.fatal('Could not find the c library flags') else: out = self.test_bld.err flags = parse_fortran_link(out.splitlines()) self.end_msg('ok (%s)' % ' '.join(flags)) self.env.LINKFLAGS_CLIB = flags return flags return [] def getoutput(conf, cmd, stdin=False): """ Obtains Fortran command outputs """ from waflib import Errors if conf.env.env: env = conf.env.env else: env = dict(os.environ) env['LANG'] = 'C' input = stdin and '\n'.encode() or None try: out, err = conf.cmd_and_log(cmd, env=env, output=0, input=input) except Errors.WafError as e: # An WafError might indicate an error code during the command # execution, in this case we still obtain the stderr and stdout, # which we can use to find the version string. if not (hasattr(e, 'stderr') and hasattr(e, 'stdout')): raise e else: # Ignore the return code and return the original # stdout and stderr. out = e.stdout err = e.stderr except Exception: conf.fatal('could not determine the compiler version %r' % cmd) return (out, err) # ------------------------------------------------------------------------ ROUTINES_CODE = """\ subroutine foobar() return end subroutine foo_bar() return end """ MAIN_CODE = """ void %(dummy_func_nounder)s(void); void %(dummy_func_under)s(void); int %(main_func_name)s() { %(dummy_func_nounder)s(); %(dummy_func_under)s(); return 0; } """ @feature('link_main_routines_func') @before_method('process_source') def link_main_routines_tg_method(self): """ The configuration test declares a unique task generator, so we create other task generators from there for fortran link tests """ def write_test_file(task): task.outputs[0].write(task.generator.code) bld = self.bld bld(rule=write_test_file, target='main.c', code=MAIN_CODE % self.__dict__) bld(rule=write_test_file, target='test.f', code=ROUTINES_CODE) bld(features='fc fcstlib', source='test.f', target='test') bld(features='c fcprogram', source='main.c', target='app', use='test') def mangling_schemes(): """ Generate triplets for use with mangle_name (used in check_fortran_mangling) the order is tuned for gfortan """ for u in ('_', ''): for du in ('', '_'): for c in ("lower", "upper"): yield (u, du, c) def mangle_name(u, du, c, name): """Mangle a name from a triplet (used in check_fortran_mangling)""" return getattr(name, c)() + u + (name.find('_') != -1 and du or '') @conf def check_fortran_mangling(self, *k, **kw): """ Detect the mangling scheme, sets FORTRAN_MANGLING to the triplet found This test will compile a fortran static library, then link a c app against it """ if not self.env.CC: self.fatal('A c compiler is required for link_main_routines') if not self.env.FC: self.fatal('A fortran compiler is required for link_main_routines') if not self.env.FC_MAIN: self.fatal('Checking for mangling requires self.env.FC_MAIN (execute "check_fortran_dummy_main" first?)') self.start_msg('Getting fortran mangling scheme') for (u, du, c) in mangling_schemes(): try: self.check_cc( compile_filename = [], features = 'link_main_routines_func', msg = 'nomsg', errmsg = 'nomsg', dummy_func_nounder = mangle_name(u, du, c, 'foobar'), dummy_func_under = mangle_name(u, du, c, 'foo_bar'), main_func_name = self.env.FC_MAIN ) except self.errors.ConfigurationError: pass else: self.end_msg("ok ('%s', '%s', '%s-case')" % (u, du, c)) self.env.FORTRAN_MANGLING = (u, du, c) break else: self.end_msg(False) self.fatal('mangler not found') return (u, du, c) @feature('pyext') @before_method('propagate_uselib_vars', 'apply_link') def set_lib_pat(self): """Sets the Fortran flags for linking with Python""" self.env.fcshlib_PATTERN = self.env.pyext_PATTERN @conf def detect_openmp(self): """ Detects openmp flags and sets the OPENMP ``FCFLAGS``/``LINKFLAGS`` """ for x in ('-fopenmp','-openmp','-mp','-xopenmp','-omp','-qsmp=omp'): try: self.check_fc( msg = 'Checking for OpenMP flag %s' % x, fragment = 'program main\n call omp_get_num_threads()\nend program main', fcflags = x, linkflags = x, uselib_store = 'OPENMP' ) except self.errors.ConfigurationError: pass else: break else: self.fatal('Could not find OpenMP') @conf def check_gfortran_o_space(self): if self.env.FC_NAME != 'GFORTRAN' or int(self.env.FC_VERSION[0]) > 4: # This is for old compilers and only for gfortran. # No idea how other implementations handle this. Be safe and bail out. return self.env.stash() self.env.FCLNK_TGT_F = ['-o', ''] try: self.check_fc(msg='Checking if the -o link must be split from arguments', fragment=FC_FRAGMENT, features='fc fcshlib') except self.errors.ConfigurationError: self.env.revert() else: self.env.commit() tdb-1.4.2/third_party/waf/waflib/Tools/fc_scan.py0000660000000000000000000000602113527011455021645 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # DC 2008 # Thomas Nagy 2016-2018 (ita) import re INC_REGEX = r"""(?:^|['">]\s*;)\s*(?:|#\s*)INCLUDE\s+(?:\w+_)?[<"'](.+?)(?=["'>])""" USE_REGEX = r"""(?:^|;)\s*USE(?:\s+|(?:(?:\s*,\s*(?:NON_)?INTRINSIC)?\s*::))\s*(\w+)""" MOD_REGEX = r"""(?:^|;)\s*MODULE(?!\s+(?:PROCEDURE|SUBROUTINE|FUNCTION))\s+(\w+)""" SMD_REGEX = r"""(?:^|;)\s*SUBMODULE\s*\(([\w:]+)\)\s*(\w+)""" re_inc = re.compile(INC_REGEX, re.I) re_use = re.compile(USE_REGEX, re.I) re_mod = re.compile(MOD_REGEX, re.I) re_smd = re.compile(SMD_REGEX, re.I) class fortran_parser(object): """ This parser returns: * the nodes corresponding to the module names to produce * the nodes corresponding to the include files used * the module names used by the fortran files """ def __init__(self, incpaths): self.seen = [] """Files already parsed""" self.nodes = [] """List of :py:class:`waflib.Node.Node` representing the dependencies to return""" self.names = [] """List of module names to return""" self.incpaths = incpaths """List of :py:class:`waflib.Node.Node` representing the include paths""" def find_deps(self, node): """ Parses a Fortran file to obtain the dependencies used/provided :param node: fortran file to read :type node: :py:class:`waflib.Node.Node` :return: lists representing the includes, the modules used, and the modules created by a fortran file :rtype: tuple of list of strings """ txt = node.read() incs = [] uses = [] mods = [] for line in txt.splitlines(): # line by line regexp search? optimize? m = re_inc.search(line) if m: incs.append(m.group(1)) m = re_use.search(line) if m: uses.append(m.group(1)) m = re_mod.search(line) if m: mods.append(m.group(1)) m = re_smd.search(line) if m: uses.append(m.group(1)) mods.append('{0}:{1}'.format(m.group(1),m.group(2))) return (incs, uses, mods) def start(self, node): """ Start parsing. Use the stack ``self.waiting`` to hold nodes to iterate on :param node: fortran file :type node: :py:class:`waflib.Node.Node` """ self.waiting = [node] while self.waiting: nd = self.waiting.pop(0) self.iter(nd) def iter(self, node): """ Processes a single file during dependency parsing. Extracts files used modules used and modules provided. """ incs, uses, mods = self.find_deps(node) for x in incs: if x in self.seen: continue self.seen.append(x) self.tryfind_header(x) for x in uses: name = "USE@%s" % x if not name in self.names: self.names.append(name) for x in mods: name = "MOD@%s" % x if not name in self.names: self.names.append(name) def tryfind_header(self, filename): """ Adds an include file to the list of nodes to process :param filename: file name :type filename: string """ found = None for n in self.incpaths: found = n.find_resource(filename) if found: self.nodes.append(found) self.waiting.append(found) break if not found: if not filename in self.names: self.names.append(filename) tdb-1.4.2/third_party/waf/waflib/Tools/flex.py0000660000000000000000000000302113444661622021211 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # John O'Meara, 2006 # Thomas Nagy, 2006-2018 (ita) """ The **flex** program is a code generator which creates C or C++ files. The generated files are compiled into object files. """ import os, re from waflib import Task, TaskGen from waflib.Tools import ccroot def decide_ext(self, node): if 'cxx' in self.features: return ['.lex.cc'] return ['.lex.c'] def flexfun(tsk): env = tsk.env bld = tsk.generator.bld wd = bld.variant_dir def to_list(xx): if isinstance(xx, str): return [xx] return xx tsk.last_cmd = lst = [] lst.extend(to_list(env.FLEX)) lst.extend(to_list(env.FLEXFLAGS)) inputs = [a.path_from(tsk.get_cwd()) for a in tsk.inputs] if env.FLEX_MSYS: inputs = [x.replace(os.sep, '/') for x in inputs] lst.extend(inputs) lst = [x for x in lst if x] txt = bld.cmd_and_log(lst, cwd=wd, env=env.env or None, quiet=0) tsk.outputs[0].write(txt.replace('\r\n', '\n').replace('\r', '\n')) # issue #1207 TaskGen.declare_chain( name = 'flex', rule = flexfun, # issue #854 ext_in = '.l', decider = decide_ext, ) # To support the following: # bld(features='c', flexflags='-P/foo') Task.classes['flex'].vars = ['FLEXFLAGS', 'FLEX'] ccroot.USELIB_VARS['c'].add('FLEXFLAGS') ccroot.USELIB_VARS['cxx'].add('FLEXFLAGS') def configure(conf): """ Detect the *flex* program """ conf.find_program('flex', var='FLEX') conf.env.FLEXFLAGS = ['-t'] if re.search (r"\\msys\\[0-9.]+\\bin\\flex.exe$", conf.env.FLEX[0]): # this is the flex shipped with MSYS conf.env.FLEX_MSYS = True tdb-1.4.2/third_party/waf/waflib/Tools/g95.py0000660000000000000000000000276213444661622020672 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # KWS 2010 # Thomas Nagy 2016-2018 (ita) import re from waflib import Utils from waflib.Tools import fc, fc_config, fc_scan, ar from waflib.Configure import conf @conf def find_g95(conf): fc = conf.find_program('g95', var='FC') conf.get_g95_version(fc) conf.env.FC_NAME = 'G95' @conf def g95_flags(conf): v = conf.env v.FCFLAGS_fcshlib = ['-fPIC'] v.FORTRANMODFLAG = ['-fmod=', ''] # template for module path v.FCFLAGS_DEBUG = ['-Werror'] # why not @conf def g95_modifier_win32(conf): fc_config.fortran_modifier_win32(conf) @conf def g95_modifier_cygwin(conf): fc_config.fortran_modifier_cygwin(conf) @conf def g95_modifier_darwin(conf): fc_config.fortran_modifier_darwin(conf) @conf def g95_modifier_platform(conf): dest_os = conf.env.DEST_OS or Utils.unversioned_sys_platform() g95_modifier_func = getattr(conf, 'g95_modifier_' + dest_os, None) if g95_modifier_func: g95_modifier_func() @conf def get_g95_version(conf, fc): """get the compiler version""" version_re = re.compile(r"g95\s*(?P\d*)\.(?P\d*)").search cmd = fc + ['--version'] out, err = fc_config.getoutput(conf, cmd, stdin=False) if out: match = version_re(out) else: match = version_re(err) if not match: conf.fatal('cannot determine g95 version') k = match.groupdict() conf.env.FC_VERSION = (k['major'], k['minor']) def configure(conf): conf.find_g95() conf.find_ar() conf.fc_flags() conf.fc_add_flags() conf.g95_flags() conf.g95_modifier_platform() tdb-1.4.2/third_party/waf/waflib/Tools/gas.py0000660000000000000000000000064513444661622021036 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2008-2018 (ita) "Detect as/gas/gcc for compiling assembly files" import waflib.Tools.asm # - leave this from waflib.Tools import ar def configure(conf): """ Find the programs gas/as/gcc and set the variable *AS* """ conf.find_program(['gas', 'gcc'], var='AS') conf.env.AS_TGT_F = ['-c', '-o'] conf.env.ASLNK_TGT_F = ['-o'] conf.find_ar() conf.load('asm') tdb-1.4.2/third_party/waf/waflib/Tools/gcc.py0000660000000000000000000000770113444661622021020 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2018 (ita) # Ralf Habacker, 2006 (rh) # Yinon Ehrlich, 2009 """ gcc/llvm detection. """ from waflib.Tools import ccroot, ar from waflib.Configure import conf @conf def find_gcc(conf): """ Find the program gcc, and if present, try to detect its version number """ cc = conf.find_program(['gcc', 'cc'], var='CC') conf.get_cc_version(cc, gcc=True) conf.env.CC_NAME = 'gcc' @conf def gcc_common_flags(conf): """ Common flags for gcc on nearly all platforms """ v = conf.env v.CC_SRC_F = [] v.CC_TGT_F = ['-c', '-o'] if not v.LINK_CC: v.LINK_CC = v.CC v.CCLNK_SRC_F = [] v.CCLNK_TGT_F = ['-o'] v.CPPPATH_ST = '-I%s' v.DEFINES_ST = '-D%s' v.LIB_ST = '-l%s' # template for adding libs v.LIBPATH_ST = '-L%s' # template for adding libpaths v.STLIB_ST = '-l%s' v.STLIBPATH_ST = '-L%s' v.RPATH_ST = '-Wl,-rpath,%s' v.SONAME_ST = '-Wl,-h,%s' v.SHLIB_MARKER = '-Wl,-Bdynamic' v.STLIB_MARKER = '-Wl,-Bstatic' v.cprogram_PATTERN = '%s' v.CFLAGS_cshlib = ['-fPIC'] v.LINKFLAGS_cshlib = ['-shared'] v.cshlib_PATTERN = 'lib%s.so' v.LINKFLAGS_cstlib = ['-Wl,-Bstatic'] v.cstlib_PATTERN = 'lib%s.a' v.LINKFLAGS_MACBUNDLE = ['-bundle', '-undefined', 'dynamic_lookup'] v.CFLAGS_MACBUNDLE = ['-fPIC'] v.macbundle_PATTERN = '%s.bundle' @conf def gcc_modifier_win32(conf): """Configuration flags for executing gcc on Windows""" v = conf.env v.cprogram_PATTERN = '%s.exe' v.cshlib_PATTERN = '%s.dll' v.implib_PATTERN = '%s.dll.a' v.IMPLIB_ST = '-Wl,--out-implib,%s' v.CFLAGS_cshlib = [] # Auto-import is enabled by default even without this option, # but enabling it explicitly has the nice effect of suppressing the rather boring, debug-level messages # that the linker emits otherwise. v.append_value('LINKFLAGS', ['-Wl,--enable-auto-import']) @conf def gcc_modifier_cygwin(conf): """Configuration flags for executing gcc on Cygwin""" gcc_modifier_win32(conf) v = conf.env v.cshlib_PATTERN = 'cyg%s.dll' v.append_value('LINKFLAGS_cshlib', ['-Wl,--enable-auto-image-base']) v.CFLAGS_cshlib = [] @conf def gcc_modifier_darwin(conf): """Configuration flags for executing gcc on MacOS""" v = conf.env v.CFLAGS_cshlib = ['-fPIC'] v.LINKFLAGS_cshlib = ['-dynamiclib'] v.cshlib_PATTERN = 'lib%s.dylib' v.FRAMEWORKPATH_ST = '-F%s' v.FRAMEWORK_ST = ['-framework'] v.ARCH_ST = ['-arch'] v.LINKFLAGS_cstlib = [] v.SHLIB_MARKER = [] v.STLIB_MARKER = [] v.SONAME_ST = [] @conf def gcc_modifier_aix(conf): """Configuration flags for executing gcc on AIX""" v = conf.env v.LINKFLAGS_cprogram = ['-Wl,-brtl'] v.LINKFLAGS_cshlib = ['-shared','-Wl,-brtl,-bexpfull'] v.SHLIB_MARKER = [] @conf def gcc_modifier_hpux(conf): v = conf.env v.SHLIB_MARKER = [] v.STLIB_MARKER = [] v.CFLAGS_cshlib = ['-fPIC','-DPIC'] v.cshlib_PATTERN = 'lib%s.sl' @conf def gcc_modifier_openbsd(conf): conf.env.SONAME_ST = [] @conf def gcc_modifier_osf1V(conf): v = conf.env v.SHLIB_MARKER = [] v.STLIB_MARKER = [] v.SONAME_ST = [] @conf def gcc_modifier_platform(conf): """Execute platform-specific functions based on *gcc_modifier_+NAME*""" # * set configurations specific for a platform. # * the destination platform is detected automatically by looking at the macros the compiler predefines, # and if it's not recognised, it fallbacks to sys.platform. gcc_modifier_func = getattr(conf, 'gcc_modifier_' + conf.env.DEST_OS, None) if gcc_modifier_func: gcc_modifier_func() def configure(conf): """ Configuration for gcc """ conf.find_gcc() conf.find_ar() conf.gcc_common_flags() conf.gcc_modifier_platform() conf.cc_load_tools() conf.cc_add_flags() conf.link_add_flags() conf.check_gcc_o_space() tdb-1.4.2/third_party/waf/waflib/Tools/gdc.py0000660000000000000000000000212313444661622021012 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Carlos Rafael Giani, 2007 (dv) from waflib.Tools import ar, d from waflib.Configure import conf @conf def find_gdc(conf): """ Finds the program gdc and set the variable *D* """ conf.find_program('gdc', var='D') out = conf.cmd_and_log(conf.env.D + ['--version']) if out.find("gdc") == -1: conf.fatal("detected compiler is not gdc") @conf def common_flags_gdc(conf): """ Sets the flags required by *gdc* """ v = conf.env v.DFLAGS = [] v.D_SRC_F = ['-c'] v.D_TGT_F = '-o%s' v.D_LINKER = v.D v.DLNK_SRC_F = '' v.DLNK_TGT_F = '-o%s' v.DINC_ST = '-I%s' v.DSHLIB_MARKER = v.DSTLIB_MARKER = '' v.DSTLIB_ST = v.DSHLIB_ST = '-l%s' v.DSTLIBPATH_ST = v.DLIBPATH_ST = '-L%s' v.LINKFLAGS_dshlib = ['-shared'] v.DHEADER_ext = '.di' v.DFLAGS_d_with_header = '-fintfc' v.D_HDR_F = '-fintfc-file=%s' def configure(conf): """ Configuration for gdc """ conf.find_gdc() conf.load('ar') conf.load('d') conf.common_flags_gdc() conf.d_platform_flags() tdb-1.4.2/third_party/waf/waflib/Tools/gfortran.py0000660000000000000000000000442213444661622022103 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # DC 2008 # Thomas Nagy 2016-2018 (ita) import re from waflib import Utils from waflib.Tools import fc, fc_config, fc_scan, ar from waflib.Configure import conf @conf def find_gfortran(conf): """Find the gfortran program (will look in the environment variable 'FC')""" fc = conf.find_program(['gfortran','g77'], var='FC') # (fallback to g77 for systems, where no gfortran is available) conf.get_gfortran_version(fc) conf.env.FC_NAME = 'GFORTRAN' @conf def gfortran_flags(conf): v = conf.env v.FCFLAGS_fcshlib = ['-fPIC'] v.FORTRANMODFLAG = ['-J', ''] # template for module path v.FCFLAGS_DEBUG = ['-Werror'] # why not @conf def gfortran_modifier_win32(conf): fc_config.fortran_modifier_win32(conf) @conf def gfortran_modifier_cygwin(conf): fc_config.fortran_modifier_cygwin(conf) @conf def gfortran_modifier_darwin(conf): fc_config.fortran_modifier_darwin(conf) @conf def gfortran_modifier_platform(conf): dest_os = conf.env.DEST_OS or Utils.unversioned_sys_platform() gfortran_modifier_func = getattr(conf, 'gfortran_modifier_' + dest_os, None) if gfortran_modifier_func: gfortran_modifier_func() @conf def get_gfortran_version(conf, fc): """Get the compiler version""" # ensure this is actually gfortran, not an imposter. version_re = re.compile(r"GNU\s*Fortran", re.I).search cmd = fc + ['--version'] out, err = fc_config.getoutput(conf, cmd, stdin=False) if out: match = version_re(out) else: match = version_re(err) if not match: conf.fatal('Could not determine the compiler type') # --- now get more detailed info -- see c_config.get_cc_version cmd = fc + ['-dM', '-E', '-'] out, err = fc_config.getoutput(conf, cmd, stdin=True) if out.find('__GNUC__') < 0: conf.fatal('Could not determine the compiler type') k = {} out = out.splitlines() import shlex for line in out: lst = shlex.split(line) if len(lst)>2: key = lst[1] val = lst[2] k[key] = val def isD(var): return var in k def isT(var): return var in k and k[var] != '0' conf.env.FC_VERSION = (k['__GNUC__'], k['__GNUC_MINOR__'], k['__GNUC_PATCHLEVEL__']) def configure(conf): conf.find_gfortran() conf.find_ar() conf.fc_flags() conf.fc_add_flags() conf.gfortran_flags() conf.gfortran_modifier_platform() conf.check_gfortran_o_space() tdb-1.4.2/third_party/waf/waflib/Tools/glib2.py0000660000000000000000000003663313444661622021271 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2018 (ita) """ Support for GLib2 tools: * marshal * enums * gsettings * gresource """ import os import functools from waflib import Context, Task, Utils, Options, Errors, Logs from waflib.TaskGen import taskgen_method, before_method, feature, extension from waflib.Configure import conf ################## marshal files @taskgen_method def add_marshal_file(self, filename, prefix): """ Adds a file to the list of marshal files to process. Store them in the attribute *marshal_list*. :param filename: xml file to compile :type filename: string :param prefix: marshal prefix (--prefix=prefix) :type prefix: string """ if not hasattr(self, 'marshal_list'): self.marshal_list = [] self.meths.append('process_marshal') self.marshal_list.append((filename, prefix)) @before_method('process_source') def process_marshal(self): """ Processes the marshal files stored in the attribute *marshal_list* to create :py:class:`waflib.Tools.glib2.glib_genmarshal` instances. Adds the c file created to the list of source to process. """ for f, prefix in getattr(self, 'marshal_list', []): node = self.path.find_resource(f) if not node: raise Errors.WafError('file not found %r' % f) h_node = node.change_ext('.h') c_node = node.change_ext('.c') task = self.create_task('glib_genmarshal', node, [h_node, c_node]) task.env.GLIB_GENMARSHAL_PREFIX = prefix self.source = self.to_nodes(getattr(self, 'source', [])) self.source.append(c_node) class glib_genmarshal(Task.Task): vars = ['GLIB_GENMARSHAL_PREFIX', 'GLIB_GENMARSHAL'] color = 'BLUE' ext_out = ['.h'] def run(self): bld = self.generator.bld get = self.env.get_flat cmd1 = "%s %s --prefix=%s --header > %s" % ( get('GLIB_GENMARSHAL'), self.inputs[0].srcpath(), get('GLIB_GENMARSHAL_PREFIX'), self.outputs[0].abspath() ) ret = bld.exec_command(cmd1) if ret: return ret #print self.outputs[1].abspath() c = '''#include "%s"\n''' % self.outputs[0].name self.outputs[1].write(c) cmd2 = "%s %s --prefix=%s --body >> %s" % ( get('GLIB_GENMARSHAL'), self.inputs[0].srcpath(), get('GLIB_GENMARSHAL_PREFIX'), self.outputs[1].abspath() ) return bld.exec_command(cmd2) ########################## glib-mkenums @taskgen_method def add_enums_from_template(self, source='', target='', template='', comments=''): """ Adds a file to the list of enum files to process. Stores them in the attribute *enums_list*. :param source: enum file to process :type source: string :param target: target file :type target: string :param template: template file :type template: string :param comments: comments :type comments: string """ if not hasattr(self, 'enums_list'): self.enums_list = [] self.meths.append('process_enums') self.enums_list.append({'source': source, 'target': target, 'template': template, 'file-head': '', 'file-prod': '', 'file-tail': '', 'enum-prod': '', 'value-head': '', 'value-prod': '', 'value-tail': '', 'comments': comments}) @taskgen_method def add_enums(self, source='', target='', file_head='', file_prod='', file_tail='', enum_prod='', value_head='', value_prod='', value_tail='', comments=''): """ Adds a file to the list of enum files to process. Stores them in the attribute *enums_list*. :param source: enum file to process :type source: string :param target: target file :type target: string :param file_head: unused :param file_prod: unused :param file_tail: unused :param enum_prod: unused :param value_head: unused :param value_prod: unused :param value_tail: unused :param comments: comments :type comments: string """ if not hasattr(self, 'enums_list'): self.enums_list = [] self.meths.append('process_enums') self.enums_list.append({'source': source, 'template': '', 'target': target, 'file-head': file_head, 'file-prod': file_prod, 'file-tail': file_tail, 'enum-prod': enum_prod, 'value-head': value_head, 'value-prod': value_prod, 'value-tail': value_tail, 'comments': comments}) @before_method('process_source') def process_enums(self): """ Processes the enum files stored in the attribute *enum_list* to create :py:class:`waflib.Tools.glib2.glib_mkenums` instances. """ for enum in getattr(self, 'enums_list', []): task = self.create_task('glib_mkenums') env = task.env inputs = [] # process the source source_list = self.to_list(enum['source']) if not source_list: raise Errors.WafError('missing source ' + str(enum)) source_list = [self.path.find_resource(k) for k in source_list] inputs += source_list env.GLIB_MKENUMS_SOURCE = [k.abspath() for k in source_list] # find the target if not enum['target']: raise Errors.WafError('missing target ' + str(enum)) tgt_node = self.path.find_or_declare(enum['target']) if tgt_node.name.endswith('.c'): self.source.append(tgt_node) env.GLIB_MKENUMS_TARGET = tgt_node.abspath() options = [] if enum['template']: # template, if provided template_node = self.path.find_resource(enum['template']) options.append('--template %s' % (template_node.abspath())) inputs.append(template_node) params = {'file-head' : '--fhead', 'file-prod' : '--fprod', 'file-tail' : '--ftail', 'enum-prod' : '--eprod', 'value-head' : '--vhead', 'value-prod' : '--vprod', 'value-tail' : '--vtail', 'comments': '--comments'} for param, option in params.items(): if enum[param]: options.append('%s %r' % (option, enum[param])) env.GLIB_MKENUMS_OPTIONS = ' '.join(options) # update the task instance task.set_inputs(inputs) task.set_outputs(tgt_node) class glib_mkenums(Task.Task): """ Processes enum files """ run_str = '${GLIB_MKENUMS} ${GLIB_MKENUMS_OPTIONS} ${GLIB_MKENUMS_SOURCE} > ${GLIB_MKENUMS_TARGET}' color = 'PINK' ext_out = ['.h'] ######################################### gsettings @taskgen_method def add_settings_schemas(self, filename_list): """ Adds settings files to process to *settings_schema_files* :param filename_list: files :type filename_list: list of string """ if not hasattr(self, 'settings_schema_files'): self.settings_schema_files = [] if not isinstance(filename_list, list): filename_list = [filename_list] self.settings_schema_files.extend(filename_list) @taskgen_method def add_settings_enums(self, namespace, filename_list): """ Called only once by task generator to set the enums namespace. :param namespace: namespace :type namespace: string :param filename_list: enum files to process :type filename_list: file list """ if hasattr(self, 'settings_enum_namespace'): raise Errors.WafError("Tried to add gsettings enums to %r more than once" % self.name) self.settings_enum_namespace = namespace if not isinstance(filename_list, list): filename_list = [filename_list] self.settings_enum_files = filename_list @feature('glib2') def process_settings(self): """ Processes the schema files in *settings_schema_files* to create :py:class:`waflib.Tools.glib2.glib_mkenums` instances. The same files are validated through :py:class:`waflib.Tools.glib2.glib_validate_schema` tasks. """ enums_tgt_node = [] install_files = [] settings_schema_files = getattr(self, 'settings_schema_files', []) if settings_schema_files and not self.env.GLIB_COMPILE_SCHEMAS: raise Errors.WafError ("Unable to process GSettings schemas - glib-compile-schemas was not found during configure") # 1. process gsettings_enum_files (generate .enums.xml) # if hasattr(self, 'settings_enum_files'): enums_task = self.create_task('glib_mkenums') source_list = self.settings_enum_files source_list = [self.path.find_resource(k) for k in source_list] enums_task.set_inputs(source_list) enums_task.env.GLIB_MKENUMS_SOURCE = [k.abspath() for k in source_list] target = self.settings_enum_namespace + '.enums.xml' tgt_node = self.path.find_or_declare(target) enums_task.set_outputs(tgt_node) enums_task.env.GLIB_MKENUMS_TARGET = tgt_node.abspath() enums_tgt_node = [tgt_node] install_files.append(tgt_node) options = '--comments "" --fhead "" --vhead " <@type@ id=\\"%s.@EnumName@\\">" --vprod " " --vtail " " --ftail "" ' % (self.settings_enum_namespace) enums_task.env.GLIB_MKENUMS_OPTIONS = options # 2. process gsettings_schema_files (validate .gschema.xml files) # for schema in settings_schema_files: schema_task = self.create_task ('glib_validate_schema') schema_node = self.path.find_resource(schema) if not schema_node: raise Errors.WafError("Cannot find the schema file %r" % schema) install_files.append(schema_node) source_list = enums_tgt_node + [schema_node] schema_task.set_inputs (source_list) schema_task.env.GLIB_COMPILE_SCHEMAS_OPTIONS = [("--schema-file=" + k.abspath()) for k in source_list] target_node = schema_node.change_ext('.xml.valid') schema_task.set_outputs (target_node) schema_task.env.GLIB_VALIDATE_SCHEMA_OUTPUT = target_node.abspath() # 3. schemas install task def compile_schemas_callback(bld): if not bld.is_install: return compile_schemas = Utils.to_list(bld.env.GLIB_COMPILE_SCHEMAS) destdir = Options.options.destdir paths = bld._compile_schemas_registered if destdir: paths = (os.path.join(destdir, path.lstrip(os.sep)) for path in paths) for path in paths: Logs.pprint('YELLOW', 'Updating GSettings schema cache %r' % path) if self.bld.exec_command(compile_schemas + [path]): Logs.warn('Could not update GSettings schema cache %r' % path) if self.bld.is_install: schemadir = self.env.GSETTINGSSCHEMADIR if not schemadir: raise Errors.WafError ('GSETTINGSSCHEMADIR not defined (should have been set up automatically during configure)') if install_files: self.add_install_files(install_to=schemadir, install_from=install_files) registered_schemas = getattr(self.bld, '_compile_schemas_registered', None) if not registered_schemas: registered_schemas = self.bld._compile_schemas_registered = set() self.bld.add_post_fun(compile_schemas_callback) registered_schemas.add(schemadir) class glib_validate_schema(Task.Task): """ Validates schema files """ run_str = 'rm -f ${GLIB_VALIDATE_SCHEMA_OUTPUT} && ${GLIB_COMPILE_SCHEMAS} --dry-run ${GLIB_COMPILE_SCHEMAS_OPTIONS} && touch ${GLIB_VALIDATE_SCHEMA_OUTPUT}' color = 'PINK' ################## gresource @extension('.gresource.xml') def process_gresource_source(self, node): """ Creates tasks that turn ``.gresource.xml`` files to C code """ if not self.env.GLIB_COMPILE_RESOURCES: raise Errors.WafError ("Unable to process GResource file - glib-compile-resources was not found during configure") if 'gresource' in self.features: return h_node = node.change_ext('_xml.h') c_node = node.change_ext('_xml.c') self.create_task('glib_gresource_source', node, [h_node, c_node]) self.source.append(c_node) @feature('gresource') def process_gresource_bundle(self): """ Creates tasks to turn ``.gresource`` files from ``.gresource.xml`` files:: def build(bld): bld( features='gresource', source=['resources1.gresource.xml', 'resources2.gresource.xml'], install_path='${LIBDIR}/${PACKAGE}' ) :param source: XML files to process :type source: list of string :param install_path: installation path :type install_path: string """ for i in self.to_list(self.source): node = self.path.find_resource(i) task = self.create_task('glib_gresource_bundle', node, node.change_ext('')) inst_to = getattr(self, 'install_path', None) if inst_to: self.add_install_files(install_to=inst_to, install_from=task.outputs) class glib_gresource_base(Task.Task): """ Base class for gresource based tasks """ color = 'BLUE' base_cmd = '${GLIB_COMPILE_RESOURCES} --sourcedir=${SRC[0].parent.srcpath()} --sourcedir=${SRC[0].bld_dir()}' def scan(self): """ Scans gresource dependencies through ``glib-compile-resources --generate-dependencies command`` """ bld = self.generator.bld kw = {} kw['cwd'] = self.get_cwd() kw['quiet'] = Context.BOTH cmd = Utils.subst_vars('${GLIB_COMPILE_RESOURCES} --sourcedir=%s --sourcedir=%s --generate-dependencies %s' % ( self.inputs[0].parent.srcpath(), self.inputs[0].bld_dir(), self.inputs[0].bldpath() ), self.env) output = bld.cmd_and_log(cmd, **kw) nodes = [] names = [] for dep in output.splitlines(): if dep: node = bld.bldnode.find_node(dep) if node: nodes.append(node) else: names.append(dep) return (nodes, names) class glib_gresource_source(glib_gresource_base): """ Task to generate C source code (.h and .c files) from a gresource.xml file """ vars = ['GLIB_COMPILE_RESOURCES'] fun_h = Task.compile_fun_shell(glib_gresource_base.base_cmd + ' --target=${TGT[0].abspath()} --generate-header ${SRC}') fun_c = Task.compile_fun_shell(glib_gresource_base.base_cmd + ' --target=${TGT[1].abspath()} --generate-source ${SRC}') ext_out = ['.h'] def run(self): return self.fun_h[0](self) or self.fun_c[0](self) class glib_gresource_bundle(glib_gresource_base): """ Task to generate a .gresource binary file from a gresource.xml file """ run_str = glib_gresource_base.base_cmd + ' --target=${TGT} ${SRC}' shell = True # temporary workaround for #795 @conf def find_glib_genmarshal(conf): conf.find_program('glib-genmarshal', var='GLIB_GENMARSHAL') @conf def find_glib_mkenums(conf): if not conf.env.PERL: conf.find_program('perl', var='PERL') conf.find_program('glib-mkenums', interpreter='PERL', var='GLIB_MKENUMS') @conf def find_glib_compile_schemas(conf): # when cross-compiling, gsettings.m4 locates the program with the following: # pkg-config --variable glib_compile_schemas gio-2.0 conf.find_program('glib-compile-schemas', var='GLIB_COMPILE_SCHEMAS') def getstr(varname): return getattr(Options.options, varname, getattr(conf.env,varname, '')) gsettingsschemadir = getstr('GSETTINGSSCHEMADIR') if not gsettingsschemadir: datadir = getstr('DATADIR') if not datadir: prefix = conf.env.PREFIX datadir = os.path.join(prefix, 'share') gsettingsschemadir = os.path.join(datadir, 'glib-2.0', 'schemas') conf.env.GSETTINGSSCHEMADIR = gsettingsschemadir @conf def find_glib_compile_resources(conf): conf.find_program('glib-compile-resources', var='GLIB_COMPILE_RESOURCES') def configure(conf): """ Finds the following programs: * *glib-genmarshal* and set *GLIB_GENMARSHAL* * *glib-mkenums* and set *GLIB_MKENUMS* * *glib-compile-schemas* and set *GLIB_COMPILE_SCHEMAS* (not mandatory) * *glib-compile-resources* and set *GLIB_COMPILE_RESOURCES* (not mandatory) """ conf.find_glib_genmarshal() conf.find_glib_mkenums() conf.find_glib_compile_schemas(mandatory=False) conf.find_glib_compile_resources(mandatory=False) def options(opt): """ Adds the ``--gsettingsschemadir`` command-line option """ gr = opt.add_option_group('Installation directories') gr.add_option('--gsettingsschemadir', help='GSettings schema location [DATADIR/glib-2.0/schemas]', default='', dest='GSETTINGSSCHEMADIR') tdb-1.4.2/third_party/waf/waflib/Tools/gnu_dirs.py0000660000000000000000000001207613444661622022077 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Ali Sabil, 2007 """ Sets various standard variables such as INCLUDEDIR. SBINDIR and others. To use this module just call:: opt.load('gnu_dirs') and:: conf.load('gnu_dirs') Add options for the standard GNU directories, this tool will add the options found in autotools, and will update the environment with the following installation variables: ============== ========================================= ======================= Variable Description Default Value ============== ========================================= ======================= PREFIX installation prefix /usr/local EXEC_PREFIX installation prefix for binaries PREFIX BINDIR user commands EXEC_PREFIX/bin SBINDIR system binaries EXEC_PREFIX/sbin LIBEXECDIR program-specific binaries EXEC_PREFIX/libexec SYSCONFDIR host-specific configuration PREFIX/etc SHAREDSTATEDIR architecture-independent variable data PREFIX/com LOCALSTATEDIR variable data PREFIX/var LIBDIR object code libraries EXEC_PREFIX/lib INCLUDEDIR header files PREFIX/include OLDINCLUDEDIR header files for non-GCC compilers /usr/include DATAROOTDIR architecture-independent data root PREFIX/share DATADIR architecture-independent data DATAROOTDIR INFODIR GNU "info" documentation DATAROOTDIR/info LOCALEDIR locale-dependent data DATAROOTDIR/locale MANDIR manual pages DATAROOTDIR/man DOCDIR documentation root DATAROOTDIR/doc/APPNAME HTMLDIR HTML documentation DOCDIR DVIDIR DVI documentation DOCDIR PDFDIR PDF documentation DOCDIR PSDIR PostScript documentation DOCDIR ============== ========================================= ======================= """ import os, re from waflib import Utils, Options, Context gnuopts = ''' bindir, user commands, ${EXEC_PREFIX}/bin sbindir, system binaries, ${EXEC_PREFIX}/sbin libexecdir, program-specific binaries, ${EXEC_PREFIX}/libexec sysconfdir, host-specific configuration, ${PREFIX}/etc sharedstatedir, architecture-independent variable data, ${PREFIX}/com localstatedir, variable data, ${PREFIX}/var libdir, object code libraries, ${EXEC_PREFIX}/lib%s includedir, header files, ${PREFIX}/include oldincludedir, header files for non-GCC compilers, /usr/include datarootdir, architecture-independent data root, ${PREFIX}/share datadir, architecture-independent data, ${DATAROOTDIR} infodir, GNU "info" documentation, ${DATAROOTDIR}/info localedir, locale-dependent data, ${DATAROOTDIR}/locale mandir, manual pages, ${DATAROOTDIR}/man docdir, documentation root, ${DATAROOTDIR}/doc/${PACKAGE} htmldir, HTML documentation, ${DOCDIR} dvidir, DVI documentation, ${DOCDIR} pdfdir, PDF documentation, ${DOCDIR} psdir, PostScript documentation, ${DOCDIR} ''' % Utils.lib64() _options = [x.split(', ') for x in gnuopts.splitlines() if x] def configure(conf): """ Reads the command-line options to set lots of variables in *conf.env*. The variables BINDIR and LIBDIR will be overwritten. """ def get_param(varname, default): return getattr(Options.options, varname, '') or default env = conf.env env.LIBDIR = env.BINDIR = [] env.EXEC_PREFIX = get_param('EXEC_PREFIX', env.PREFIX) env.PACKAGE = getattr(Context.g_module, 'APPNAME', None) or env.PACKAGE complete = False iter = 0 while not complete and iter < len(_options) + 1: iter += 1 complete = True for name, help, default in _options: name = name.upper() if not env[name]: try: env[name] = Utils.subst_vars(get_param(name, default).replace('/', os.sep), env) except TypeError: complete = False if not complete: lst = [x for x, _, _ in _options if not env[x.upper()]] raise conf.errors.WafError('Variable substitution failure %r' % lst) def options(opt): """ Adds lots of command-line options, for example:: --exec-prefix: EXEC_PREFIX """ inst_dir = opt.add_option_group('Installation prefix', 'By default, "waf install" will put the files in\ "/usr/local/bin", "/usr/local/lib" etc. An installation prefix other\ than "/usr/local" can be given using "--prefix", for example "--prefix=$HOME"') for k in ('--prefix', '--destdir'): option = opt.parser.get_option(k) if option: opt.parser.remove_option(k) inst_dir.add_option(option) inst_dir.add_option('--exec-prefix', help = 'installation prefix for binaries [PREFIX]', default = '', dest = 'EXEC_PREFIX') dirs_options = opt.add_option_group('Installation directories') for name, help, default in _options: option_name = '--' + name str_default = default str_help = '%s [%s]' % (help, re.sub(r'\$\{([^}]+)\}', r'\1', str_default)) dirs_options.add_option(option_name, help=str_help, default='', dest=name.upper()) tdb-1.4.2/third_party/waf/waflib/Tools/gxx.py0000660000000000000000000000774013444661622021075 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2018 (ita) # Ralf Habacker, 2006 (rh) # Yinon Ehrlich, 2009 """ g++/llvm detection. """ from waflib.Tools import ccroot, ar from waflib.Configure import conf @conf def find_gxx(conf): """ Finds the program g++, and if present, try to detect its version number """ cxx = conf.find_program(['g++', 'c++'], var='CXX') conf.get_cc_version(cxx, gcc=True) conf.env.CXX_NAME = 'gcc' @conf def gxx_common_flags(conf): """ Common flags for g++ on nearly all platforms """ v = conf.env v.CXX_SRC_F = [] v.CXX_TGT_F = ['-c', '-o'] if not v.LINK_CXX: v.LINK_CXX = v.CXX v.CXXLNK_SRC_F = [] v.CXXLNK_TGT_F = ['-o'] v.CPPPATH_ST = '-I%s' v.DEFINES_ST = '-D%s' v.LIB_ST = '-l%s' # template for adding libs v.LIBPATH_ST = '-L%s' # template for adding libpaths v.STLIB_ST = '-l%s' v.STLIBPATH_ST = '-L%s' v.RPATH_ST = '-Wl,-rpath,%s' v.SONAME_ST = '-Wl,-h,%s' v.SHLIB_MARKER = '-Wl,-Bdynamic' v.STLIB_MARKER = '-Wl,-Bstatic' v.cxxprogram_PATTERN = '%s' v.CXXFLAGS_cxxshlib = ['-fPIC'] v.LINKFLAGS_cxxshlib = ['-shared'] v.cxxshlib_PATTERN = 'lib%s.so' v.LINKFLAGS_cxxstlib = ['-Wl,-Bstatic'] v.cxxstlib_PATTERN = 'lib%s.a' v.LINKFLAGS_MACBUNDLE = ['-bundle', '-undefined', 'dynamic_lookup'] v.CXXFLAGS_MACBUNDLE = ['-fPIC'] v.macbundle_PATTERN = '%s.bundle' @conf def gxx_modifier_win32(conf): """Configuration flags for executing gcc on Windows""" v = conf.env v.cxxprogram_PATTERN = '%s.exe' v.cxxshlib_PATTERN = '%s.dll' v.implib_PATTERN = '%s.dll.a' v.IMPLIB_ST = '-Wl,--out-implib,%s' v.CXXFLAGS_cxxshlib = [] # Auto-import is enabled by default even without this option, # but enabling it explicitly has the nice effect of suppressing the rather boring, debug-level messages # that the linker emits otherwise. v.append_value('LINKFLAGS', ['-Wl,--enable-auto-import']) @conf def gxx_modifier_cygwin(conf): """Configuration flags for executing g++ on Cygwin""" gxx_modifier_win32(conf) v = conf.env v.cxxshlib_PATTERN = 'cyg%s.dll' v.append_value('LINKFLAGS_cxxshlib', ['-Wl,--enable-auto-image-base']) v.CXXFLAGS_cxxshlib = [] @conf def gxx_modifier_darwin(conf): """Configuration flags for executing g++ on MacOS""" v = conf.env v.CXXFLAGS_cxxshlib = ['-fPIC'] v.LINKFLAGS_cxxshlib = ['-dynamiclib'] v.cxxshlib_PATTERN = 'lib%s.dylib' v.FRAMEWORKPATH_ST = '-F%s' v.FRAMEWORK_ST = ['-framework'] v.ARCH_ST = ['-arch'] v.LINKFLAGS_cxxstlib = [] v.SHLIB_MARKER = [] v.STLIB_MARKER = [] v.SONAME_ST = [] @conf def gxx_modifier_aix(conf): """Configuration flags for executing g++ on AIX""" v = conf.env v.LINKFLAGS_cxxprogram= ['-Wl,-brtl'] v.LINKFLAGS_cxxshlib = ['-shared', '-Wl,-brtl,-bexpfull'] v.SHLIB_MARKER = [] @conf def gxx_modifier_hpux(conf): v = conf.env v.SHLIB_MARKER = [] v.STLIB_MARKER = [] v.CFLAGS_cxxshlib = ['-fPIC','-DPIC'] v.cxxshlib_PATTERN = 'lib%s.sl' @conf def gxx_modifier_openbsd(conf): conf.env.SONAME_ST = [] @conf def gcc_modifier_osf1V(conf): v = conf.env v.SHLIB_MARKER = [] v.STLIB_MARKER = [] v.SONAME_ST = [] @conf def gxx_modifier_platform(conf): """Execute platform-specific functions based on *gxx_modifier_+NAME*""" # * set configurations specific for a platform. # * the destination platform is detected automatically by looking at the macros the compiler predefines, # and if it's not recognised, it fallbacks to sys.platform. gxx_modifier_func = getattr(conf, 'gxx_modifier_' + conf.env.DEST_OS, None) if gxx_modifier_func: gxx_modifier_func() def configure(conf): """ Configuration for g++ """ conf.find_gxx() conf.find_ar() conf.gxx_common_flags() conf.gxx_modifier_platform() conf.cxx_load_tools() conf.cxx_add_flags() conf.link_add_flags() conf.check_gcc_o_space('cxx') tdb-1.4.2/third_party/waf/waflib/Tools/icc.py0000660000000000000000000000113413444661622021014 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Stian Selnes 2008 # Thomas Nagy 2009-2018 (ita) """ Detects the Intel C compiler """ import sys from waflib.Tools import ccroot, ar, gcc from waflib.Configure import conf @conf def find_icc(conf): """ Finds the program icc and execute it to ensure it really is icc """ cc = conf.find_program(['icc', 'ICL'], var='CC') conf.get_cc_version(cc, icc=True) conf.env.CC_NAME = 'icc' def configure(conf): conf.find_icc() conf.find_ar() conf.gcc_common_flags() conf.gcc_modifier_platform() conf.cc_load_tools() conf.cc_add_flags() conf.link_add_flags() tdb-1.4.2/third_party/waf/waflib/Tools/icpc.py0000660000000000000000000000111613444661622021174 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy 2009-2018 (ita) """ Detects the Intel C++ compiler """ import sys from waflib.Tools import ccroot, ar, gxx from waflib.Configure import conf @conf def find_icpc(conf): """ Finds the program icpc, and execute it to ensure it really is icpc """ cxx = conf.find_program('icpc', var='CXX') conf.get_cc_version(cxx, icc=True) conf.env.CXX_NAME = 'icc' def configure(conf): conf.find_icpc() conf.find_ar() conf.gxx_common_flags() conf.gxx_modifier_platform() conf.cxx_load_tools() conf.cxx_add_flags() conf.link_add_flags() tdb-1.4.2/third_party/waf/waflib/Tools/ifort.py0000660000000000000000000003027113527011455021400 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # DC 2008 # Thomas Nagy 2016-2018 (ita) import os, re, traceback from waflib import Utils, Logs, Errors from waflib.Tools import fc, fc_config, fc_scan, ar, ccroot from waflib.Configure import conf from waflib.TaskGen import after_method, feature @conf def find_ifort(conf): fc = conf.find_program('ifort', var='FC') conf.get_ifort_version(fc) conf.env.FC_NAME = 'IFORT' @conf def ifort_modifier_win32(self): v = self.env v.IFORT_WIN32 = True v.FCSTLIB_MARKER = '' v.FCSHLIB_MARKER = '' v.FCLIB_ST = v.FCSTLIB_ST = '%s.lib' v.FCLIBPATH_ST = v.STLIBPATH_ST = '/LIBPATH:%s' v.FCINCPATH_ST = '/I%s' v.FCDEFINES_ST = '/D%s' v.fcprogram_PATTERN = v.fcprogram_test_PATTERN = '%s.exe' v.fcshlib_PATTERN = '%s.dll' v.fcstlib_PATTERN = v.implib_PATTERN = '%s.lib' v.FCLNK_TGT_F = '/out:' v.FC_TGT_F = ['/c', '/o', ''] v.FCFLAGS_fcshlib = '' v.LINKFLAGS_fcshlib = '/DLL' v.AR_TGT_F = '/out:' v.IMPLIB_ST = '/IMPLIB:%s' v.append_value('LINKFLAGS', '/subsystem:console') if v.IFORT_MANIFEST: v.append_value('LINKFLAGS', ['/MANIFEST']) @conf def ifort_modifier_darwin(conf): fc_config.fortran_modifier_darwin(conf) @conf def ifort_modifier_platform(conf): dest_os = conf.env.DEST_OS or Utils.unversioned_sys_platform() ifort_modifier_func = getattr(conf, 'ifort_modifier_' + dest_os, None) if ifort_modifier_func: ifort_modifier_func() @conf def get_ifort_version(conf, fc): """ Detects the compiler version and sets ``conf.env.FC_VERSION`` """ version_re = re.compile(r"\bIntel\b.*\bVersion\s*(?P\d*)\.(?P\d*)",re.I).search if Utils.is_win32: cmd = fc else: cmd = fc + ['-logo'] out, err = fc_config.getoutput(conf, cmd, stdin=False) match = version_re(out) or version_re(err) if not match: conf.fatal('cannot determine ifort version.') k = match.groupdict() conf.env.FC_VERSION = (k['major'], k['minor']) def configure(conf): """ Detects the Intel Fortran compilers """ if Utils.is_win32: compiler, version, path, includes, libdirs, arch = conf.detect_ifort() v = conf.env v.DEST_CPU = arch v.PATH = path v.INCLUDES = includes v.LIBPATH = libdirs v.MSVC_COMPILER = compiler try: v.MSVC_VERSION = float(version) except ValueError: v.MSVC_VERSION = float(version[:-3]) conf.find_ifort_win32() conf.ifort_modifier_win32() else: conf.find_ifort() conf.find_program('xiar', var='AR') conf.find_ar() conf.fc_flags() conf.fc_add_flags() conf.ifort_modifier_platform() all_ifort_platforms = [ ('intel64', 'amd64'), ('em64t', 'amd64'), ('ia32', 'x86'), ('Itanium', 'ia64')] """List of icl platforms""" @conf def gather_ifort_versions(conf, versions): """ List compiler versions by looking up registry keys """ version_pattern = re.compile(r'^...?.?\....?.?') try: all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Intel\\Compilers\\Fortran') except OSError: try: all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Intel\\Compilers\\Fortran') except OSError: return index = 0 while 1: try: version = Utils.winreg.EnumKey(all_versions, index) except OSError: break index += 1 if not version_pattern.match(version): continue targets = {} for target,arch in all_ifort_platforms: if target=='intel64': targetDir='EM64T_NATIVE' else: targetDir=target try: Utils.winreg.OpenKey(all_versions,version+'\\'+targetDir) icl_version=Utils.winreg.OpenKey(all_versions,version) path,type=Utils.winreg.QueryValueEx(icl_version,'ProductDir') except OSError: pass else: batch_file=os.path.join(path,'bin','ifortvars.bat') if os.path.isfile(batch_file): targets[target] = target_compiler(conf, 'intel', arch, version, target, batch_file) for target,arch in all_ifort_platforms: try: icl_version = Utils.winreg.OpenKey(all_versions, version+'\\'+target) path,type = Utils.winreg.QueryValueEx(icl_version,'ProductDir') except OSError: continue else: batch_file=os.path.join(path,'bin','ifortvars.bat') if os.path.isfile(batch_file): targets[target] = target_compiler(conf, 'intel', arch, version, target, batch_file) major = version[0:2] versions['intel ' + major] = targets @conf def setup_ifort(conf, versiondict): """ Checks installed compilers and targets and returns the first combination from the user's options, env, or the global supported lists that checks. :param versiondict: dict(platform -> dict(architecture -> configuration)) :type versiondict: dict(string -> dict(string -> target_compiler) :return: the compiler, revision, path, include dirs, library paths and target architecture :rtype: tuple of strings """ platforms = Utils.to_list(conf.env.MSVC_TARGETS) or [i for i,j in all_ifort_platforms] desired_versions = conf.env.MSVC_VERSIONS or list(reversed(list(versiondict.keys()))) for version in desired_versions: try: targets = versiondict[version] except KeyError: continue for arch in platforms: try: cfg = targets[arch] except KeyError: continue cfg.evaluate() if cfg.is_valid: compiler,revision = version.rsplit(' ', 1) return compiler,revision,cfg.bindirs,cfg.incdirs,cfg.libdirs,cfg.cpu conf.fatal('ifort: Impossible to find a valid architecture for building %r - %r' % (desired_versions, list(versiondict.keys()))) @conf def get_ifort_version_win32(conf, compiler, version, target, vcvars): # FIXME hack try: conf.msvc_cnt += 1 except AttributeError: conf.msvc_cnt = 1 batfile = conf.bldnode.make_node('waf-print-msvc-%d.bat' % conf.msvc_cnt) batfile.write("""@echo off set INCLUDE= set LIB= call "%s" %s echo PATH=%%PATH%% echo INCLUDE=%%INCLUDE%% echo LIB=%%LIB%%;%%LIBPATH%% """ % (vcvars,target)) sout = conf.cmd_and_log(['cmd.exe', '/E:on', '/V:on', '/C', batfile.abspath()]) batfile.delete() lines = sout.splitlines() if not lines[0]: lines.pop(0) MSVC_PATH = MSVC_INCDIR = MSVC_LIBDIR = None for line in lines: if line.startswith('PATH='): path = line[5:] MSVC_PATH = path.split(';') elif line.startswith('INCLUDE='): MSVC_INCDIR = [i for i in line[8:].split(';') if i] elif line.startswith('LIB='): MSVC_LIBDIR = [i for i in line[4:].split(';') if i] if None in (MSVC_PATH, MSVC_INCDIR, MSVC_LIBDIR): conf.fatal('ifort: Could not find a valid architecture for building (get_ifort_version_win32)') # Check if the compiler is usable at all. # The detection may return 64-bit versions even on 32-bit systems, and these would fail to run. env = dict(os.environ) env.update(PATH = path) compiler_name, linker_name, lib_name = _get_prog_names(conf, compiler) fc = conf.find_program(compiler_name, path_list=MSVC_PATH) # delete CL if exists. because it could contain parameters which can change cl's behaviour rather catastrophically. if 'CL' in env: del(env['CL']) try: conf.cmd_and_log(fc + ['/help'], env=env) except UnicodeError: st = traceback.format_exc() if conf.logger: conf.logger.error(st) conf.fatal('ifort: Unicode error - check the code page?') except Exception as e: Logs.debug('ifort: get_ifort_version: %r %r %r -> failure %s', compiler, version, target, str(e)) conf.fatal('ifort: cannot run the compiler in get_ifort_version (run with -v to display errors)') else: Logs.debug('ifort: get_ifort_version: %r %r %r -> OK', compiler, version, target) finally: conf.env[compiler_name] = '' return (MSVC_PATH, MSVC_INCDIR, MSVC_LIBDIR) class target_compiler(object): """ Wraps a compiler configuration; call evaluate() to determine whether the configuration is usable. """ def __init__(self, ctx, compiler, cpu, version, bat_target, bat, callback=None): """ :param ctx: configuration context to use to eventually get the version environment :param compiler: compiler name :param cpu: target cpu :param version: compiler version number :param bat_target: ? :param bat: path to the batch file to run :param callback: optional function to take the realized environment variables tup and map it (e.g. to combine other constant paths) """ self.conf = ctx self.name = None self.is_valid = False self.is_done = False self.compiler = compiler self.cpu = cpu self.version = version self.bat_target = bat_target self.bat = bat self.callback = callback def evaluate(self): if self.is_done: return self.is_done = True try: vs = self.conf.get_ifort_version_win32(self.compiler, self.version, self.bat_target, self.bat) except Errors.ConfigurationError: self.is_valid = False return if self.callback: vs = self.callback(self, vs) self.is_valid = True (self.bindirs, self.incdirs, self.libdirs) = vs def __str__(self): return str((self.bindirs, self.incdirs, self.libdirs)) def __repr__(self): return repr((self.bindirs, self.incdirs, self.libdirs)) @conf def detect_ifort(self): return self.setup_ifort(self.get_ifort_versions(False)) @conf def get_ifort_versions(self, eval_and_save=True): """ :return: platforms to compiler configurations :rtype: dict """ dct = {} self.gather_ifort_versions(dct) return dct def _get_prog_names(self, compiler): if compiler=='intel': compiler_name = 'ifort' linker_name = 'XILINK' lib_name = 'XILIB' else: # assumes CL.exe compiler_name = 'CL' linker_name = 'LINK' lib_name = 'LIB' return compiler_name, linker_name, lib_name @conf def find_ifort_win32(conf): # the autodetection is supposed to be performed before entering in this method v = conf.env path = v.PATH compiler = v.MSVC_COMPILER version = v.MSVC_VERSION compiler_name, linker_name, lib_name = _get_prog_names(conf, compiler) v.IFORT_MANIFEST = (compiler == 'intel' and version >= 11) # compiler fc = conf.find_program(compiler_name, var='FC', path_list=path) # before setting anything, check if the compiler is really intel fortran env = dict(conf.environ) if path: env.update(PATH = ';'.join(path)) if not conf.cmd_and_log(fc + ['/nologo', '/help'], env=env): conf.fatal('not intel fortran compiler could not be identified') v.FC_NAME = 'IFORT' if not v.LINK_FC: conf.find_program(linker_name, var='LINK_FC', path_list=path, mandatory=True) if not v.AR: conf.find_program(lib_name, path_list=path, var='AR', mandatory=True) v.ARFLAGS = ['/nologo'] # manifest tool. Not required for VS 2003 and below. Must have for VS 2005 and later if v.IFORT_MANIFEST: conf.find_program('MT', path_list=path, var='MT') v.MTFLAGS = ['/nologo'] try: conf.load('winres') except Errors.WafError: Logs.warn('Resource compiler not found. Compiling resource file is disabled') ####################################################################################################### ##### conf above, build below @after_method('apply_link') @feature('fc') def apply_flags_ifort(self): """ Adds additional flags implied by msvc, such as subsystems and pdb files:: def build(bld): bld.stlib(source='main.c', target='bar', subsystem='gruik') """ if not self.env.IFORT_WIN32 or not getattr(self, 'link_task', None): return is_static = isinstance(self.link_task, ccroot.stlink_task) subsystem = getattr(self, 'subsystem', '') if subsystem: subsystem = '/subsystem:%s' % subsystem flags = is_static and 'ARFLAGS' or 'LINKFLAGS' self.env.append_value(flags, subsystem) if not is_static: for f in self.env.LINKFLAGS: d = f.lower() if d[1:] == 'debug': pdbnode = self.link_task.outputs[0].change_ext('.pdb') self.link_task.outputs.append(pdbnode) if getattr(self, 'install_task', None): self.pdb_install_task = self.add_install_files(install_to=self.install_task.install_to, install_from=pdbnode) break @feature('fcprogram', 'fcshlib', 'fcprogram_test') @after_method('apply_link') def apply_manifest_ifort(self): """ Enables manifest embedding in Fortran DLLs when using ifort on Windows See: http://msdn2.microsoft.com/en-us/library/ms235542(VS.80).aspx """ if self.env.IFORT_WIN32 and getattr(self, 'link_task', None): # it seems ifort.exe cannot be called for linking self.link_task.env.FC = self.env.LINK_FC if self.env.IFORT_WIN32 and self.env.IFORT_MANIFEST and getattr(self, 'link_task', None): out_node = self.link_task.outputs[0] man_node = out_node.parent.find_or_declare(out_node.name + '.manifest') self.link_task.outputs.append(man_node) self.env.DO_MANIFEST = True tdb-1.4.2/third_party/waf/waflib/Tools/intltool.py0000660000000000000000000001520013444661622022121 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2018 (ita) """ Support for translation tools such as msgfmt and intltool Usage:: def configure(conf): conf.load('gnu_dirs intltool') def build(bld): # process the .po files into .gmo files, and install them in LOCALEDIR bld(features='intltool_po', appname='myapp', podir='po', install_path="${LOCALEDIR}") # process an input file, substituting the translations from the po dir bld( features = "intltool_in", podir = "../po", style = "desktop", flags = ["-u"], source = 'kupfer.desktop.in', install_path = "${DATADIR}/applications", ) Usage of the :py:mod:`waflib.Tools.gnu_dirs` is recommended, but not obligatory. """ from __future__ import with_statement import os, re from waflib import Context, Task, Utils, Logs import waflib.Tools.ccroot from waflib.TaskGen import feature, before_method, taskgen_method from waflib.Logs import error from waflib.Configure import conf _style_flags = { 'ba': '-b', 'desktop': '-d', 'keys': '-k', 'quoted': '--quoted-style', 'quotedxml': '--quotedxml-style', 'rfc822deb': '-r', 'schemas': '-s', 'xml': '-x', } @taskgen_method def ensure_localedir(self): """ Expands LOCALEDIR from DATAROOTDIR/locale if possible, or falls back to PREFIX/share/locale """ # use the tool gnu_dirs to provide options to define this if not self.env.LOCALEDIR: if self.env.DATAROOTDIR: self.env.LOCALEDIR = os.path.join(self.env.DATAROOTDIR, 'locale') else: self.env.LOCALEDIR = os.path.join(self.env.PREFIX, 'share', 'locale') @before_method('process_source') @feature('intltool_in') def apply_intltool_in_f(self): """ Creates tasks to translate files by intltool-merge:: def build(bld): bld( features = "intltool_in", podir = "../po", style = "desktop", flags = ["-u"], source = 'kupfer.desktop.in', install_path = "${DATADIR}/applications", ) :param podir: location of the .po files :type podir: string :param source: source files to process :type source: list of string :param style: the intltool-merge mode of operation, can be one of the following values: ``ba``, ``desktop``, ``keys``, ``quoted``, ``quotedxml``, ``rfc822deb``, ``schemas`` and ``xml``. See the ``intltool-merge`` man page for more information about supported modes of operation. :type style: string :param flags: compilation flags ("-quc" by default) :type flags: list of string :param install_path: installation path :type install_path: string """ try: self.meths.remove('process_source') except ValueError: pass self.ensure_localedir() podir = getattr(self, 'podir', '.') podirnode = self.path.find_dir(podir) if not podirnode: error("could not find the podir %r" % podir) return cache = getattr(self, 'intlcache', '.intlcache') self.env.INTLCACHE = [os.path.join(str(self.path.get_bld()), podir, cache)] self.env.INTLPODIR = podirnode.bldpath() self.env.append_value('INTLFLAGS', getattr(self, 'flags', self.env.INTLFLAGS_DEFAULT)) if '-c' in self.env.INTLFLAGS: self.bld.fatal('Redundant -c flag in intltool task %r' % self) style = getattr(self, 'style', None) if style: try: style_flag = _style_flags[style] except KeyError: self.bld.fatal('intltool_in style "%s" is not valid' % style) self.env.append_unique('INTLFLAGS', [style_flag]) for i in self.to_list(self.source): node = self.path.find_resource(i) task = self.create_task('intltool', node, node.change_ext('')) inst = getattr(self, 'install_path', None) if inst: self.add_install_files(install_to=inst, install_from=task.outputs) @feature('intltool_po') def apply_intltool_po(self): """ Creates tasks to process po files:: def build(bld): bld(features='intltool_po', appname='myapp', podir='po', install_path="${LOCALEDIR}") The relevant task generator arguments are: :param podir: directory of the .po files :type podir: string :param appname: name of the application :type appname: string :param install_path: installation directory :type install_path: string The file LINGUAS must be present in the directory pointed by *podir* and list the translation files to process. """ try: self.meths.remove('process_source') except ValueError: pass self.ensure_localedir() appname = getattr(self, 'appname', getattr(Context.g_module, Context.APPNAME, 'set_your_app_name')) podir = getattr(self, 'podir', '.') inst = getattr(self, 'install_path', '${LOCALEDIR}') linguas = self.path.find_node(os.path.join(podir, 'LINGUAS')) if linguas: # scan LINGUAS file for locales to process with open(linguas.abspath()) as f: langs = [] for line in f.readlines(): # ignore lines containing comments if not line.startswith('#'): langs += line.split() re_linguas = re.compile('[-a-zA-Z_@.]+') for lang in langs: # Make sure that we only process lines which contain locales if re_linguas.match(lang): node = self.path.find_resource(os.path.join(podir, re_linguas.match(lang).group() + '.po')) task = self.create_task('po', node, node.change_ext('.mo')) if inst: filename = task.outputs[0].name (langname, ext) = os.path.splitext(filename) inst_file = inst + os.sep + langname + os.sep + 'LC_MESSAGES' + os.sep + appname + '.mo' self.add_install_as(install_to=inst_file, install_from=task.outputs[0], chmod=getattr(self, 'chmod', Utils.O644)) else: Logs.pprint('RED', "Error no LINGUAS file found in po directory") class po(Task.Task): """ Compiles .po files into .gmo files """ run_str = '${MSGFMT} -o ${TGT} ${SRC}' color = 'BLUE' class intltool(Task.Task): """ Calls intltool-merge to update translation files """ run_str = '${INTLTOOL} ${INTLFLAGS} ${INTLCACHE_ST:INTLCACHE} ${INTLPODIR} ${SRC} ${TGT}' color = 'BLUE' @conf def find_msgfmt(conf): """ Detects msgfmt and sets the ``MSGFMT`` variable """ conf.find_program('msgfmt', var='MSGFMT') @conf def find_intltool_merge(conf): """ Detects intltool-merge """ if not conf.env.PERL: conf.find_program('perl', var='PERL') conf.env.INTLCACHE_ST = '--cache=%s' conf.env.INTLFLAGS_DEFAULT = ['-q', '-u'] conf.find_program('intltool-merge', interpreter='PERL', var='INTLTOOL') def configure(conf): """ Detects the program *msgfmt* and set *conf.env.MSGFMT*. Detects the program *intltool-merge* and set *conf.env.INTLTOOL*. It is possible to set INTLTOOL in the environment, but it must not have spaces in it:: $ INTLTOOL="/path/to/the program/intltool" waf configure If a C/C++ compiler is present, execute a compilation test to find the header *locale.h*. """ conf.find_msgfmt() conf.find_intltool_merge() if conf.env.CC or conf.env.CXX: conf.check(header_name='locale.h') tdb-1.4.2/third_party/waf/waflib/Tools/irixcc.py0000660000000000000000000000250613444661622021543 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # imported from samba """ Compiler definition for irix/MIPSpro cc compiler """ from waflib import Errors from waflib.Tools import ccroot, ar from waflib.Configure import conf @conf def find_irixcc(conf): v = conf.env cc = None if v.CC: cc = v.CC elif 'CC' in conf.environ: cc = conf.environ['CC'] if not cc: cc = conf.find_program('cc', var='CC') if not cc: conf.fatal('irixcc was not found') try: conf.cmd_and_log(cc + ['-version']) except Errors.WafError: conf.fatal('%r -version could not be executed' % cc) v.CC = cc v.CC_NAME = 'irix' @conf def irixcc_common_flags(conf): v = conf.env v.CC_SRC_F = '' v.CC_TGT_F = ['-c', '-o'] v.CPPPATH_ST = '-I%s' v.DEFINES_ST = '-D%s' if not v.LINK_CC: v.LINK_CC = v.CC v.CCLNK_SRC_F = '' v.CCLNK_TGT_F = ['-o'] v.LIB_ST = '-l%s' # template for adding libs v.LIBPATH_ST = '-L%s' # template for adding libpaths v.STLIB_ST = '-l%s' v.STLIBPATH_ST = '-L%s' v.cprogram_PATTERN = '%s' v.cshlib_PATTERN = 'lib%s.so' v.cstlib_PATTERN = 'lib%s.a' def configure(conf): conf.find_irixcc() conf.find_cpp() conf.find_ar() conf.irixcc_common_flags() conf.cc_load_tools() conf.cc_add_flags() conf.link_add_flags() tdb-1.4.2/third_party/waf/waflib/Tools/javaw.py0000660000000000000000000004077213527011455021374 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2018 (ita) """ Java support Javac is one of the few compilers that behaves very badly: #. it outputs files where it wants to (-d is only for the package root) #. it recompiles files silently behind your back #. it outputs an undefined amount of files (inner classes) Remember that the compilation can be performed using Jython[1] rather than regular Python. Instead of running one of the following commands:: ./waf configure python waf configure You would have to run:: java -jar /path/to/jython.jar waf configure [1] http://www.jython.org/ Usage ===== Load the "java" tool. def configure(conf): conf.load('java') Java tools will be autodetected and eventually, if present, the quite standard JAVA_HOME environment variable will be used. The also standard CLASSPATH variable is used for library searching. In configuration phase checks can be done on the system environment, for example to check if a class is known in the classpath:: conf.check_java_class('java.io.FileOutputStream') or if the system supports JNI applications building:: conf.check_jni_headers() The java tool supports compiling java code, creating jar files and creating javadoc documentation. This can be either done separately or together in a single definition. For example to manage them separately:: bld(features = 'javac', srcdir = 'src', compat = '1.7', use = 'animals', name = 'cats-src', ) bld(features = 'jar', basedir = '.', destfile = '../cats.jar', name = 'cats', use = 'cats-src' ) Or together by defining all the needed attributes:: bld(features = 'javac jar javadoc', srcdir = 'src/', # folder containing the sources to compile outdir = 'src', # folder where to output the classes (in the build directory) compat = '1.6', # java compatibility version number classpath = ['.', '..'], # jar basedir = 'src', # folder containing the classes and other files to package (must match outdir) destfile = 'foo.jar', # do not put the destfile in the folder of the java classes! use = 'NNN', jaropts = ['-C', 'default/src/', '.'], # can be used to give files manifest = 'src/Manifest.mf', # Manifest file to include # javadoc javadoc_package = ['com.meow' , 'com.meow.truc.bar', 'com.meow.truc.foo'], javadoc_output = 'javadoc', ) External jar dependencies can be mapped to a standard waf "use" dependency by setting an environment variable with a CLASSPATH prefix in the configuration, for example:: conf.env.CLASSPATH_NNN = ['aaaa.jar', 'bbbb.jar'] and then NNN can be freely used in rules as:: use = 'NNN', In the java tool the dependencies via use are not transitive by default, as this necessity depends on the code. To enable recursive dependency scanning use on a specific rule: recurse_use = True Or build-wise by setting RECURSE_JAVA: bld.env.RECURSE_JAVA = True Unit tests can be integrated in the waf unit test environment using the javatest extra. """ import os, shutil from waflib import Task, Utils, Errors, Node from waflib.Configure import conf from waflib.TaskGen import feature, before_method, after_method, taskgen_method from waflib.Tools import ccroot ccroot.USELIB_VARS['javac'] = set(['CLASSPATH', 'JAVACFLAGS']) SOURCE_RE = '**/*.java' JAR_RE = '**/*' class_check_source = ''' public class Test { public static void main(String[] argv) { Class lib; if (argv.length < 1) { System.err.println("Missing argument"); System.exit(77); } try { lib = Class.forName(argv[0]); } catch (ClassNotFoundException e) { System.err.println("ClassNotFoundException"); System.exit(1); } lib = null; System.exit(0); } } ''' @feature('javac') @before_method('process_source') def apply_java(self): """ Create a javac task for compiling *.java files*. There can be only one javac task by task generator. """ Utils.def_attrs(self, jarname='', classpath='', sourcepath='.', srcdir='.', jar_mf_attributes={}, jar_mf_classpath=[]) outdir = getattr(self, 'outdir', None) if outdir: if not isinstance(outdir, Node.Node): outdir = self.path.get_bld().make_node(self.outdir) else: outdir = self.path.get_bld() outdir.mkdir() self.outdir = outdir self.env.OUTDIR = outdir.abspath() self.javac_task = tsk = self.create_task('javac') tmp = [] srcdir = getattr(self, 'srcdir', '') if isinstance(srcdir, Node.Node): srcdir = [srcdir] for x in Utils.to_list(srcdir): if isinstance(x, Node.Node): y = x else: y = self.path.find_dir(x) if not y: self.bld.fatal('Could not find the folder %s from %s' % (x, self.path)) tmp.append(y) tsk.srcdir = tmp if getattr(self, 'compat', None): tsk.env.append_value('JAVACFLAGS', ['-source', str(self.compat)]) if hasattr(self, 'sourcepath'): fold = [isinstance(x, Node.Node) and x or self.path.find_dir(x) for x in self.to_list(self.sourcepath)] names = os.pathsep.join([x.srcpath() for x in fold]) else: names = [x.srcpath() for x in tsk.srcdir] if names: tsk.env.append_value('JAVACFLAGS', ['-sourcepath', names]) @taskgen_method def java_use_rec(self, name, **kw): """ Processes recursively the *use* attribute for each referred java compilation """ if name in self.tmp_use_seen: return self.tmp_use_seen.append(name) try: y = self.bld.get_tgen_by_name(name) except Errors.WafError: self.uselib.append(name) return else: y.post() # Add generated JAR name for CLASSPATH. Task ordering (set_run_after) # is already guaranteed by ordering done between the single tasks if hasattr(y, 'jar_task'): self.use_lst.append(y.jar_task.outputs[0].abspath()) else: if hasattr(y,'outdir'): self.use_lst.append(y.outdir.abspath()) else: self.use_lst.append(y.path.get_bld().abspath()) for x in self.to_list(getattr(y, 'use', [])): self.java_use_rec(x) @feature('javac') @before_method('propagate_uselib_vars') @after_method('apply_java') def use_javac_files(self): """ Processes the *use* attribute referring to other java compilations """ self.use_lst = [] self.tmp_use_seen = [] self.uselib = self.to_list(getattr(self, 'uselib', [])) names = self.to_list(getattr(self, 'use', [])) get = self.bld.get_tgen_by_name for x in names: try: tg = get(x) except Errors.WafError: self.uselib.append(x) else: tg.post() if hasattr(tg, 'jar_task'): self.use_lst.append(tg.jar_task.outputs[0].abspath()) self.javac_task.set_run_after(tg.jar_task) self.javac_task.dep_nodes.extend(tg.jar_task.outputs) else: if hasattr(tg, 'outdir'): base_node = tg.outdir.abspath() else: base_node = tg.path.get_bld() self.use_lst.append(base_node.abspath()) self.javac_task.dep_nodes.extend([x for x in base_node.ant_glob(JAR_RE, remove=False, quiet=True)]) for tsk in tg.tasks: self.javac_task.set_run_after(tsk) # If recurse use scan is enabled recursively add use attribute for each used one if getattr(self, 'recurse_use', False) or self.bld.env.RECURSE_JAVA: self.java_use_rec(x) self.env.append_value('CLASSPATH', self.use_lst) @feature('javac') @after_method('apply_java', 'propagate_uselib_vars', 'use_javac_files') def set_classpath(self): """ Sets the CLASSPATH value on the *javac* task previously created. """ if getattr(self, 'classpath', None): self.env.append_unique('CLASSPATH', getattr(self, 'classpath', [])) for x in self.tasks: x.env.CLASSPATH = os.pathsep.join(self.env.CLASSPATH) + os.pathsep @feature('jar') @after_method('apply_java', 'use_javac_files') @before_method('process_source') def jar_files(self): """ Creates a jar task (one maximum per task generator) """ destfile = getattr(self, 'destfile', 'test.jar') jaropts = getattr(self, 'jaropts', []) manifest = getattr(self, 'manifest', None) basedir = getattr(self, 'basedir', None) if basedir: if not isinstance(self.basedir, Node.Node): basedir = self.path.get_bld().make_node(basedir) else: basedir = self.path.get_bld() if not basedir: self.bld.fatal('Could not find the basedir %r for %r' % (self.basedir, self)) self.jar_task = tsk = self.create_task('jar_create') if manifest: jarcreate = getattr(self, 'jarcreate', 'cfm') if not isinstance(manifest,Node.Node): node = self.path.find_resource(manifest) else: node = manifest if not node: self.bld.fatal('invalid manifest file %r for %r' % (manifest, self)) tsk.dep_nodes.append(node) jaropts.insert(0, node.abspath()) else: jarcreate = getattr(self, 'jarcreate', 'cf') if not isinstance(destfile, Node.Node): destfile = self.path.find_or_declare(destfile) if not destfile: self.bld.fatal('invalid destfile %r for %r' % (destfile, self)) tsk.set_outputs(destfile) tsk.basedir = basedir jaropts.append('-C') jaropts.append(basedir.bldpath()) jaropts.append('.') tsk.env.JAROPTS = jaropts tsk.env.JARCREATE = jarcreate if getattr(self, 'javac_task', None): tsk.set_run_after(self.javac_task) @feature('jar') @after_method('jar_files') def use_jar_files(self): """ Processes the *use* attribute to set the build order on the tasks created by another task generator. """ self.uselib = self.to_list(getattr(self, 'uselib', [])) names = self.to_list(getattr(self, 'use', [])) get = self.bld.get_tgen_by_name for x in names: try: y = get(x) except Errors.WafError: self.uselib.append(x) else: y.post() self.jar_task.run_after.update(y.tasks) class JTask(Task.Task): """ Base class for java and jar tasks; provides functionality to run long commands """ def split_argfile(self, cmd): inline = [cmd[0]] infile = [] for x in cmd[1:]: # jar and javac do not want -J flags in @file if x.startswith('-J'): inline.append(x) else: infile.append(self.quote_flag(x)) return (inline, infile) class jar_create(JTask): """ Creates a jar file """ color = 'GREEN' run_str = '${JAR} ${JARCREATE} ${TGT} ${JAROPTS}' def runnable_status(self): """ Wait for dependent tasks to be executed, then read the files to update the list of inputs. """ for t in self.run_after: if not t.hasrun: return Task.ASK_LATER if not self.inputs: try: self.inputs = [x for x in self.basedir.ant_glob(JAR_RE, remove=False, quiet=True) if id(x) != id(self.outputs[0])] except Exception: raise Errors.WafError('Could not find the basedir %r for %r' % (self.basedir, self)) return super(jar_create, self).runnable_status() class javac(JTask): """ Compiles java files """ color = 'BLUE' run_str = '${JAVAC} -classpath ${CLASSPATH} -d ${OUTDIR} ${JAVACFLAGS} ${SRC}' vars = ['CLASSPATH', 'JAVACFLAGS', 'JAVAC', 'OUTDIR'] """ The javac task will be executed again if the variables CLASSPATH, JAVACFLAGS, JAVAC or OUTDIR change. """ def uid(self): """Identify java tasks by input&output folder""" lst = [self.__class__.__name__, self.generator.outdir.abspath()] for x in self.srcdir: lst.append(x.abspath()) return Utils.h_list(lst) def runnable_status(self): """ Waits for dependent tasks to be complete, then read the file system to find the input nodes. """ for t in self.run_after: if not t.hasrun: return Task.ASK_LATER if not self.inputs: self.inputs = [] for x in self.srcdir: if x.exists(): self.inputs.extend(x.ant_glob(SOURCE_RE, remove=False, quiet=True)) return super(javac, self).runnable_status() def post_run(self): """ List class files created """ for node in self.generator.outdir.ant_glob('**/*.class', quiet=True): self.generator.bld.node_sigs[node] = self.uid() self.generator.bld.task_sigs[self.uid()] = self.cache_sig @feature('javadoc') @after_method('process_rule') def create_javadoc(self): """ Creates a javadoc task (feature 'javadoc') """ tsk = self.create_task('javadoc') tsk.classpath = getattr(self, 'classpath', []) self.javadoc_package = Utils.to_list(self.javadoc_package) if not isinstance(self.javadoc_output, Node.Node): self.javadoc_output = self.bld.path.find_or_declare(self.javadoc_output) class javadoc(Task.Task): """ Builds java documentation """ color = 'BLUE' def __str__(self): return '%s: %s -> %s\n' % (self.__class__.__name__, self.generator.srcdir, self.generator.javadoc_output) def run(self): env = self.env bld = self.generator.bld wd = bld.bldnode #add src node + bld node (for generated java code) srcpath = self.generator.path.abspath() + os.sep + self.generator.srcdir srcpath += os.pathsep srcpath += self.generator.path.get_bld().abspath() + os.sep + self.generator.srcdir classpath = env.CLASSPATH classpath += os.pathsep classpath += os.pathsep.join(self.classpath) classpath = "".join(classpath) self.last_cmd = lst = [] lst.extend(Utils.to_list(env.JAVADOC)) lst.extend(['-d', self.generator.javadoc_output.abspath()]) lst.extend(['-sourcepath', srcpath]) lst.extend(['-classpath', classpath]) lst.extend(['-subpackages']) lst.extend(self.generator.javadoc_package) lst = [x for x in lst if x] self.generator.bld.cmd_and_log(lst, cwd=wd, env=env.env or None, quiet=0) def post_run(self): nodes = self.generator.javadoc_output.ant_glob('**', quiet=True) for node in nodes: self.generator.bld.node_sigs[node] = self.uid() self.generator.bld.task_sigs[self.uid()] = self.cache_sig def configure(self): """ Detects the javac, java and jar programs """ # If JAVA_PATH is set, we prepend it to the path list java_path = self.environ['PATH'].split(os.pathsep) v = self.env if 'JAVA_HOME' in self.environ: java_path = [os.path.join(self.environ['JAVA_HOME'], 'bin')] + java_path self.env.JAVA_HOME = [self.environ['JAVA_HOME']] for x in 'javac java jar javadoc'.split(): self.find_program(x, var=x.upper(), path_list=java_path, mandatory=(x not in ('javadoc'))) if 'CLASSPATH' in self.environ: v.CLASSPATH = self.environ['CLASSPATH'] if not v.JAR: self.fatal('jar is required for making java packages') if not v.JAVAC: self.fatal('javac is required for compiling java classes') v.JARCREATE = 'cf' # can use cvf v.JAVACFLAGS = [] @conf def check_java_class(self, classname, with_classpath=None): """ Checks if the specified java class exists :param classname: class to check, like java.util.HashMap :type classname: string :param with_classpath: additional classpath to give :type with_classpath: string """ javatestdir = '.waf-javatest' classpath = javatestdir if self.env.CLASSPATH: classpath += os.pathsep + self.env.CLASSPATH if isinstance(with_classpath, str): classpath += os.pathsep + with_classpath shutil.rmtree(javatestdir, True) os.mkdir(javatestdir) Utils.writef(os.path.join(javatestdir, 'Test.java'), class_check_source) # Compile the source self.exec_command(self.env.JAVAC + [os.path.join(javatestdir, 'Test.java')], shell=False) # Try to run the app cmd = self.env.JAVA + ['-cp', classpath, 'Test', classname] self.to_log("%s\n" % str(cmd)) found = self.exec_command(cmd, shell=False) self.msg('Checking for java class %s' % classname, not found) shutil.rmtree(javatestdir, True) return found @conf def check_jni_headers(conf): """ Checks for jni headers and libraries. On success the conf.env variables xxx_JAVA are added for use in C/C++ targets:: def options(opt): opt.load('compiler_c') def configure(conf): conf.load('compiler_c java') conf.check_jni_headers() def build(bld): bld.shlib(source='a.c', target='app', use='JAVA') """ if not conf.env.CC_NAME and not conf.env.CXX_NAME: conf.fatal('load a compiler first (gcc, g++, ..)') if not conf.env.JAVA_HOME: conf.fatal('set JAVA_HOME in the system environment') # jni requires the jvm javaHome = conf.env.JAVA_HOME[0] dir = conf.root.find_dir(conf.env.JAVA_HOME[0] + '/include') if dir is None: dir = conf.root.find_dir(conf.env.JAVA_HOME[0] + '/../Headers') # think different?! if dir is None: conf.fatal('JAVA_HOME does not seem to be set properly') f = dir.ant_glob('**/(jni|jni_md).h') incDirs = [x.parent.abspath() for x in f] dir = conf.root.find_dir(conf.env.JAVA_HOME[0]) f = dir.ant_glob('**/*jvm.(so|dll|dylib)') libDirs = [x.parent.abspath() for x in f] or [javaHome] # On windows, we need both the .dll and .lib to link. On my JDK, they are # in different directories... f = dir.ant_glob('**/*jvm.(lib)') if f: libDirs = [[x, y.parent.abspath()] for x in libDirs for y in f] if conf.env.DEST_OS == 'freebsd': conf.env.append_unique('LINKFLAGS_JAVA', '-pthread') for d in libDirs: try: conf.check(header_name='jni.h', define_name='HAVE_JNI_H', lib='jvm', libpath=d, includes=incDirs, uselib_store='JAVA', uselib='JAVA') except Exception: pass else: break else: conf.fatal('could not find lib jvm in %r (see config.log)' % libDirs) tdb-1.4.2/third_party/waf/waflib/Tools/ldc2.py0000660000000000000000000000224113444661622021102 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Alex Rønne Petersen, 2012 (alexrp/Zor) from waflib.Tools import ar, d from waflib.Configure import conf @conf def find_ldc2(conf): """ Finds the program *ldc2* and set the variable *D* """ conf.find_program(['ldc2'], var='D') out = conf.cmd_and_log(conf.env.D + ['-version']) if out.find("based on DMD v2.") == -1: conf.fatal("detected compiler is not ldc2") @conf def common_flags_ldc2(conf): """ Sets the D flags required by *ldc2* """ v = conf.env v.D_SRC_F = ['-c'] v.D_TGT_F = '-of%s' v.D_LINKER = v.D v.DLNK_SRC_F = '' v.DLNK_TGT_F = '-of%s' v.DINC_ST = '-I%s' v.DSHLIB_MARKER = v.DSTLIB_MARKER = '' v.DSTLIB_ST = v.DSHLIB_ST = '-L-l%s' v.DSTLIBPATH_ST = v.DLIBPATH_ST = '-L-L%s' v.LINKFLAGS_dshlib = ['-L-shared'] v.DHEADER_ext = '.di' v.DFLAGS_d_with_header = ['-H', '-Hf'] v.D_HDR_F = '%s' v.LINKFLAGS = [] v.DFLAGS_dshlib = ['-relocation-model=pic'] def configure(conf): """ Configuration for *ldc2* """ conf.find_ldc2() conf.load('ar') conf.load('d') conf.common_flags_ldc2() conf.d_platform_flags() tdb-1.4.2/third_party/waf/waflib/Tools/lua.py0000660000000000000000000000152313444661622021041 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Sebastian Schlingmann, 2008 # Thomas Nagy, 2008-2018 (ita) """ Lua support. Compile *.lua* files into *.luac*:: def configure(conf): conf.load('lua') conf.env.LUADIR = '/usr/local/share/myapp/scripts/' def build(bld): bld(source='foo.lua') """ from waflib.TaskGen import extension from waflib import Task @extension('.lua') def add_lua(self, node): tsk = self.create_task('luac', node, node.change_ext('.luac')) inst_to = getattr(self, 'install_path', self.env.LUADIR and '${LUADIR}' or None) if inst_to: self.add_install_files(install_to=inst_to, install_from=tsk.outputs) return tsk class luac(Task.Task): run_str = '${LUAC} -s -o ${TGT} ${SRC}' color = 'PINK' def configure(conf): """ Detect the luac compiler and set *conf.env.LUAC* """ conf.find_program('luac', var='LUAC') tdb-1.4.2/third_party/waf/waflib/Tools/md5_tstamp.py0000660000000000000000000000170313527011455022330 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 """ Re-calculate md5 hashes of files only when the file time have changed:: def options(opt): opt.load('md5_tstamp') The hashes can also reflect either the file contents (STRONGEST=True) or the file time and file size. The performance benefits of this module are usually insignificant. """ import os, stat from waflib import Utils, Build, Node STRONGEST = True Build.SAVED_ATTRS.append('hashes_md5_tstamp') def h_file(self): filename = self.abspath() st = os.stat(filename) cache = self.ctx.hashes_md5_tstamp if filename in cache and cache[filename][0] == st.st_mtime: return cache[filename][1] if STRONGEST: ret = Utils.h_file(filename) else: if stat.S_ISDIR(st[stat.ST_MODE]): raise IOError('Not a file') ret = Utils.md5(str((st.st_mtime, st.st_size)).encode()).digest() cache[filename] = (st.st_mtime, ret) return ret h_file.__doc__ = Node.Node.h_file.__doc__ Node.Node.h_file = h_file tdb-1.4.2/third_party/waf/waflib/Tools/msvc.py0000660000000000000000000010420213527011455021221 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Carlos Rafael Giani, 2006 (dv) # Tamas Pal, 2007 (folti) # Nicolas Mercier, 2009 # Matt Clarkson, 2012 """ Microsoft Visual C++/Intel C++ compiler support If you get detection problems, first try any of the following:: chcp 65001 set PYTHONIOENCODING=... set PYTHONLEGACYWINDOWSSTDIO=1 Usage:: $ waf configure --msvc_version="msvc 10.0,msvc 9.0" --msvc_target="x64" or:: def configure(conf): conf.env.MSVC_VERSIONS = ['msvc 10.0', 'msvc 9.0', 'msvc 8.0', 'msvc 7.1', 'msvc 7.0', 'msvc 6.0', 'wsdk 7.0', 'intel 11', 'PocketPC 9.0', 'Smartphone 8.0'] conf.env.MSVC_TARGETS = ['x64'] conf.load('msvc') or:: def configure(conf): conf.load('msvc', funs='no_autodetect') conf.check_lib_msvc('gdi32') conf.check_libs_msvc('kernel32 user32') def build(bld): tg = bld.program(source='main.c', target='app', use='KERNEL32 USER32 GDI32') Platforms and targets will be tested in the order they appear; the first good configuration will be used. To force testing all the configurations that are not used, use the ``--no-msvc-lazy`` option or set ``conf.env.MSVC_LAZY_AUTODETECT=False``. Supported platforms: ia64, x64, x86, x86_amd64, x86_ia64, x86_arm, amd64_x86, amd64_arm Compilers supported: * msvc => Visual Studio, versions 6.0 (VC 98, VC .NET 2002) to 15 (Visual Studio 2017) * wsdk => Windows SDK, versions 6.0, 6.1, 7.0, 7.1, 8.0 * icl => Intel compiler, versions 9, 10, 11, 13 * winphone => Visual Studio to target Windows Phone 8 native (version 8.0 for now) * Smartphone => Compiler/SDK for Smartphone devices (armv4/v4i) * PocketPC => Compiler/SDK for PocketPC devices (armv4/v4i) To use WAF in a VS2008 Make file project (see http://code.google.com/p/waf/issues/detail?id=894) You may consider to set the environment variable "VS_UNICODE_OUTPUT" to nothing before calling waf. So in your project settings use something like 'cmd.exe /C "set VS_UNICODE_OUTPUT=& set PYTHONUNBUFFERED=true & waf build"'. cmd.exe /C "chcp 1252 & set PYTHONUNBUFFERED=true && set && waf configure" Setting PYTHONUNBUFFERED gives the unbuffered output. """ import os, sys, re, traceback from waflib import Utils, Logs, Options, Errors from waflib.TaskGen import after_method, feature from waflib.Configure import conf from waflib.Tools import ccroot, c, cxx, ar g_msvc_systemlibs = ''' aclui activeds ad1 adptif adsiid advapi32 asycfilt authz bhsupp bits bufferoverflowu cabinet cap certadm certidl ciuuid clusapi comctl32 comdlg32 comsupp comsuppd comsuppw comsuppwd comsvcs credui crypt32 cryptnet cryptui d3d8thk daouuid dbgeng dbghelp dciman32 ddao35 ddao35d ddao35u ddao35ud delayimp dhcpcsvc dhcpsapi dlcapi dnsapi dsprop dsuiext dtchelp faultrep fcachdll fci fdi framedyd framedyn gdi32 gdiplus glauxglu32 gpedit gpmuuid gtrts32w gtrtst32hlink htmlhelp httpapi icm32 icmui imagehlp imm32 iphlpapi iprop kernel32 ksguid ksproxy ksuser libcmt libcmtd libcpmt libcpmtd loadperf lz32 mapi mapi32 mgmtapi minidump mmc mobsync mpr mprapi mqoa mqrt msacm32 mscms mscoree msdasc msimg32 msrating mstask msvcmrt msvcurt msvcurtd mswsock msxml2 mtx mtxdm netapi32 nmapinmsupp npptools ntdsapi ntdsbcli ntmsapi ntquery odbc32 odbcbcp odbccp32 oldnames ole32 oleacc oleaut32 oledb oledlgolepro32 opends60 opengl32 osptk parser pdh penter pgobootrun pgort powrprof psapi ptrustm ptrustmd ptrustu ptrustud qosname rasapi32 rasdlg rassapi resutils riched20 rpcndr rpcns4 rpcrt4 rtm rtutils runtmchk scarddlg scrnsave scrnsavw secur32 sensapi setupapi sfc shell32 shfolder shlwapi sisbkup snmpapi sporder srclient sti strsafe svcguid tapi32 thunk32 traffic unicows url urlmon user32 userenv usp10 uuid uxtheme vcomp vcompd vdmdbg version vfw32 wbemuuid webpost wiaguid wininet winmm winscard winspool winstrm wintrust wldap32 wmiutils wow32 ws2_32 wsnmp32 wsock32 wst wtsapi32 xaswitch xolehlp '''.split() """importlibs provided by MSVC/Platform SDK. Do NOT search them""" all_msvc_platforms = [ ('x64', 'amd64'), ('x86', 'x86'), ('ia64', 'ia64'), ('x86_amd64', 'amd64'), ('x86_ia64', 'ia64'), ('x86_arm', 'arm'), ('x86_arm64', 'arm64'), ('amd64_x86', 'x86'), ('amd64_arm', 'arm'), ('amd64_arm64', 'arm64') ] """List of msvc platforms""" all_wince_platforms = [ ('armv4', 'arm'), ('armv4i', 'arm'), ('mipsii', 'mips'), ('mipsii_fp', 'mips'), ('mipsiv', 'mips'), ('mipsiv_fp', 'mips'), ('sh4', 'sh'), ('x86', 'cex86') ] """List of wince platforms""" all_icl_platforms = [ ('intel64', 'amd64'), ('em64t', 'amd64'), ('ia32', 'x86'), ('Itanium', 'ia64')] """List of icl platforms""" def options(opt): opt.add_option('--msvc_version', type='string', help = 'msvc version, eg: "msvc 10.0,msvc 9.0"', default='') opt.add_option('--msvc_targets', type='string', help = 'msvc targets, eg: "x64,arm"', default='') opt.add_option('--no-msvc-lazy', action='store_false', help = 'lazily check msvc target environments', default=True, dest='msvc_lazy') @conf def setup_msvc(conf, versiondict): """ Checks installed compilers and targets and returns the first combination from the user's options, env, or the global supported lists that checks. :param versiondict: dict(platform -> dict(architecture -> configuration)) :type versiondict: dict(string -> dict(string -> target_compiler) :return: the compiler, revision, path, include dirs, library paths and target architecture :rtype: tuple of strings """ platforms = getattr(Options.options, 'msvc_targets', '').split(',') if platforms == ['']: platforms=Utils.to_list(conf.env.MSVC_TARGETS) or [i for i,j in all_msvc_platforms+all_icl_platforms+all_wince_platforms] desired_versions = getattr(Options.options, 'msvc_version', '').split(',') if desired_versions == ['']: desired_versions = conf.env.MSVC_VERSIONS or list(reversed(sorted(versiondict.keys()))) # Override lazy detection by evaluating after the fact. lazy_detect = getattr(Options.options, 'msvc_lazy', True) if conf.env.MSVC_LAZY_AUTODETECT is False: lazy_detect = False if not lazy_detect: for val in versiondict.values(): for arch in list(val.keys()): cfg = val[arch] cfg.evaluate() if not cfg.is_valid: del val[arch] conf.env.MSVC_INSTALLED_VERSIONS = versiondict for version in desired_versions: Logs.debug('msvc: detecting %r - %r', version, desired_versions) try: targets = versiondict[version] except KeyError: continue seen = set() for arch in platforms: if arch in seen: continue else: seen.add(arch) try: cfg = targets[arch] except KeyError: continue cfg.evaluate() if cfg.is_valid: compiler,revision = version.rsplit(' ', 1) return compiler,revision,cfg.bindirs,cfg.incdirs,cfg.libdirs,cfg.cpu conf.fatal('msvc: Impossible to find a valid architecture for building %r - %r' % (desired_versions, list(versiondict.keys()))) @conf def get_msvc_version(conf, compiler, version, target, vcvars): """ Checks that an installed compiler actually runs and uses vcvars to obtain the environment needed by the compiler. :param compiler: compiler type, for looking up the executable name :param version: compiler version, for debugging only :param target: target architecture :param vcvars: batch file to run to check the environment :return: the location of the compiler executable, the location of include dirs, and the library paths :rtype: tuple of strings """ Logs.debug('msvc: get_msvc_version: %r %r %r', compiler, version, target) try: conf.msvc_cnt += 1 except AttributeError: conf.msvc_cnt = 1 batfile = conf.bldnode.make_node('waf-print-msvc-%d.bat' % conf.msvc_cnt) batfile.write("""@echo off set INCLUDE= set LIB= call "%s" %s echo PATH=%%PATH%% echo INCLUDE=%%INCLUDE%% echo LIB=%%LIB%%;%%LIBPATH%% """ % (vcvars,target)) sout = conf.cmd_and_log(['cmd.exe', '/E:on', '/V:on', '/C', batfile.abspath()]) lines = sout.splitlines() if not lines[0]: lines.pop(0) MSVC_PATH = MSVC_INCDIR = MSVC_LIBDIR = None for line in lines: if line.startswith('PATH='): path = line[5:] MSVC_PATH = path.split(';') elif line.startswith('INCLUDE='): MSVC_INCDIR = [i for i in line[8:].split(';') if i] elif line.startswith('LIB='): MSVC_LIBDIR = [i for i in line[4:].split(';') if i] if None in (MSVC_PATH, MSVC_INCDIR, MSVC_LIBDIR): conf.fatal('msvc: Could not find a valid architecture for building (get_msvc_version_3)') # Check if the compiler is usable at all. # The detection may return 64-bit versions even on 32-bit systems, and these would fail to run. env = dict(os.environ) env.update(PATH = path) compiler_name, linker_name, lib_name = _get_prog_names(conf, compiler) cxx = conf.find_program(compiler_name, path_list=MSVC_PATH) # delete CL if exists. because it could contain parameters which can change cl's behaviour rather catastrophically. if 'CL' in env: del(env['CL']) try: conf.cmd_and_log(cxx + ['/help'], env=env) except UnicodeError: st = traceback.format_exc() if conf.logger: conf.logger.error(st) conf.fatal('msvc: Unicode error - check the code page?') except Exception as e: Logs.debug('msvc: get_msvc_version: %r %r %r -> failure %s', compiler, version, target, str(e)) conf.fatal('msvc: cannot run the compiler in get_msvc_version (run with -v to display errors)') else: Logs.debug('msvc: get_msvc_version: %r %r %r -> OK', compiler, version, target) finally: conf.env[compiler_name] = '' return (MSVC_PATH, MSVC_INCDIR, MSVC_LIBDIR) def gather_wince_supported_platforms(): """ Checks SmartPhones SDKs :param versions: list to modify :type versions: list """ supported_wince_platforms = [] try: ce_sdk = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Microsoft\\Windows CE Tools\\SDKs') except OSError: try: ce_sdk = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Microsoft\\Windows CE Tools\\SDKs') except OSError: ce_sdk = '' if not ce_sdk: return supported_wince_platforms index = 0 while 1: try: sdk_device = Utils.winreg.EnumKey(ce_sdk, index) sdk = Utils.winreg.OpenKey(ce_sdk, sdk_device) except OSError: break index += 1 try: path,type = Utils.winreg.QueryValueEx(sdk, 'SDKRootDir') except OSError: try: path,type = Utils.winreg.QueryValueEx(sdk,'SDKInformation') except OSError: continue path,xml = os.path.split(path) path = str(path) path,device = os.path.split(path) if not device: path,device = os.path.split(path) platforms = [] for arch,compiler in all_wince_platforms: if os.path.isdir(os.path.join(path, device, 'Lib', arch)): platforms.append((arch, compiler, os.path.join(path, device, 'Include', arch), os.path.join(path, device, 'Lib', arch))) if platforms: supported_wince_platforms.append((device, platforms)) return supported_wince_platforms def gather_msvc_detected_versions(): #Detected MSVC versions! version_pattern = re.compile(r'^(\d\d?\.\d\d?)(Exp)?$') detected_versions = [] for vcver,vcvar in (('VCExpress','Exp'), ('VisualStudio','')): prefix = 'SOFTWARE\\Wow6432node\\Microsoft\\' + vcver try: all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, prefix) except OSError: prefix = 'SOFTWARE\\Microsoft\\' + vcver try: all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, prefix) except OSError: continue index = 0 while 1: try: version = Utils.winreg.EnumKey(all_versions, index) except OSError: break index += 1 match = version_pattern.match(version) if match: versionnumber = float(match.group(1)) else: continue detected_versions.append((versionnumber, version+vcvar, prefix+'\\'+version)) def fun(tup): return tup[0] detected_versions.sort(key = fun) return detected_versions class target_compiler(object): """ Wrap a compiler configuration; call evaluate() to determine whether the configuration is usable. """ def __init__(self, ctx, compiler, cpu, version, bat_target, bat, callback=None): """ :param ctx: configuration context to use to eventually get the version environment :param compiler: compiler name :param cpu: target cpu :param version: compiler version number :param bat_target: ? :param bat: path to the batch file to run """ self.conf = ctx self.name = None self.is_valid = False self.is_done = False self.compiler = compiler self.cpu = cpu self.version = version self.bat_target = bat_target self.bat = bat self.callback = callback def evaluate(self): if self.is_done: return self.is_done = True try: vs = self.conf.get_msvc_version(self.compiler, self.version, self.bat_target, self.bat) except Errors.ConfigurationError: self.is_valid = False return if self.callback: vs = self.callback(self, vs) self.is_valid = True (self.bindirs, self.incdirs, self.libdirs) = vs def __str__(self): return str((self.compiler, self.cpu, self.version, self.bat_target, self.bat)) def __repr__(self): return repr((self.compiler, self.cpu, self.version, self.bat_target, self.bat)) @conf def gather_wsdk_versions(conf, versions): """ Use winreg to add the msvc versions to the input list :param versions: list to modify :type versions: list """ version_pattern = re.compile(r'^v..?.?\...?.?') try: all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Microsoft\\Microsoft SDKs\\Windows') except OSError: try: all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Microsoft\\Microsoft SDKs\\Windows') except OSError: return index = 0 while 1: try: version = Utils.winreg.EnumKey(all_versions, index) except OSError: break index += 1 if not version_pattern.match(version): continue try: msvc_version = Utils.winreg.OpenKey(all_versions, version) path,type = Utils.winreg.QueryValueEx(msvc_version,'InstallationFolder') except OSError: continue if path and os.path.isfile(os.path.join(path, 'bin', 'SetEnv.cmd')): targets = {} for target,arch in all_msvc_platforms: targets[target] = target_compiler(conf, 'wsdk', arch, version, '/'+target, os.path.join(path, 'bin', 'SetEnv.cmd')) versions['wsdk ' + version[1:]] = targets @conf def gather_msvc_targets(conf, versions, version, vc_path): #Looking for normal MSVC compilers! targets = {} if os.path.isfile(os.path.join(vc_path, 'VC', 'Auxiliary', 'Build', 'vcvarsall.bat')): for target,realtarget in all_msvc_platforms[::-1]: targets[target] = target_compiler(conf, 'msvc', realtarget, version, target, os.path.join(vc_path, 'VC', 'Auxiliary', 'Build', 'vcvarsall.bat')) elif os.path.isfile(os.path.join(vc_path, 'vcvarsall.bat')): for target,realtarget in all_msvc_platforms[::-1]: targets[target] = target_compiler(conf, 'msvc', realtarget, version, target, os.path.join(vc_path, 'vcvarsall.bat')) elif os.path.isfile(os.path.join(vc_path, 'Common7', 'Tools', 'vsvars32.bat')): targets['x86'] = target_compiler(conf, 'msvc', 'x86', version, 'x86', os.path.join(vc_path, 'Common7', 'Tools', 'vsvars32.bat')) elif os.path.isfile(os.path.join(vc_path, 'Bin', 'vcvars32.bat')): targets['x86'] = target_compiler(conf, 'msvc', 'x86', version, '', os.path.join(vc_path, 'Bin', 'vcvars32.bat')) if targets: versions['msvc %s' % version] = targets @conf def gather_wince_targets(conf, versions, version, vc_path, vsvars, supported_platforms): #Looking for Win CE compilers! for device,platforms in supported_platforms: targets = {} for platform,compiler,include,lib in platforms: winCEpath = os.path.join(vc_path, 'ce') if not os.path.isdir(winCEpath): continue if os.path.isdir(os.path.join(winCEpath, 'lib', platform)): bindirs = [os.path.join(winCEpath, 'bin', compiler), os.path.join(winCEpath, 'bin', 'x86_'+compiler)] incdirs = [os.path.join(winCEpath, 'include'), os.path.join(winCEpath, 'atlmfc', 'include'), include] libdirs = [os.path.join(winCEpath, 'lib', platform), os.path.join(winCEpath, 'atlmfc', 'lib', platform), lib] def combine_common(obj, compiler_env): # TODO this is likely broken, remove in waf 2.1 (common_bindirs,_1,_2) = compiler_env return (bindirs + common_bindirs, incdirs, libdirs) targets[platform] = target_compiler(conf, 'msvc', platform, version, 'x86', vsvars, combine_common) if targets: versions[device + ' ' + version] = targets @conf def gather_winphone_targets(conf, versions, version, vc_path, vsvars): #Looking for WinPhone compilers targets = {} for target,realtarget in all_msvc_platforms[::-1]: targets[target] = target_compiler(conf, 'winphone', realtarget, version, target, vsvars) if targets: versions['winphone ' + version] = targets @conf def gather_vswhere_versions(conf, versions): try: import json except ImportError: Logs.error('Visual Studio 2017 detection requires Python 2.6') return prg_path = os.environ.get('ProgramFiles(x86)', os.environ.get('ProgramFiles', 'C:\\Program Files (x86)')) vswhere = os.path.join(prg_path, 'Microsoft Visual Studio', 'Installer', 'vswhere.exe') args = [vswhere, '-products', '*', '-legacy', '-format', 'json'] try: txt = conf.cmd_and_log(args) except Errors.WafError as e: Logs.debug('msvc: vswhere.exe failed %s', e) return if sys.version_info[0] < 3: txt = txt.decode(Utils.console_encoding()) arr = json.loads(txt) arr.sort(key=lambda x: x['installationVersion']) for entry in arr: ver = entry['installationVersion'] ver = str('.'.join(ver.split('.')[:2])) path = str(os.path.abspath(entry['installationPath'])) if os.path.exists(path) and ('msvc %s' % ver) not in versions: conf.gather_msvc_targets(versions, ver, path) @conf def gather_msvc_versions(conf, versions): vc_paths = [] for (v,version,reg) in gather_msvc_detected_versions(): try: try: msvc_version = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, reg + "\\Setup\\VC") except OSError: msvc_version = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, reg + "\\Setup\\Microsoft Visual C++") path,type = Utils.winreg.QueryValueEx(msvc_version, 'ProductDir') except OSError: try: msvc_version = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, "SOFTWARE\\Wow6432node\\Microsoft\\VisualStudio\\SxS\\VS7") path,type = Utils.winreg.QueryValueEx(msvc_version, version) except OSError: continue else: vc_paths.append((version, os.path.abspath(str(path)))) continue else: vc_paths.append((version, os.path.abspath(str(path)))) wince_supported_platforms = gather_wince_supported_platforms() for version,vc_path in vc_paths: vs_path = os.path.dirname(vc_path) vsvars = os.path.join(vs_path, 'Common7', 'Tools', 'vsvars32.bat') if wince_supported_platforms and os.path.isfile(vsvars): conf.gather_wince_targets(versions, version, vc_path, vsvars, wince_supported_platforms) # WP80 works with 11.0Exp and 11.0, both of which resolve to the same vc_path. # Stop after one is found. for version,vc_path in vc_paths: vs_path = os.path.dirname(vc_path) vsvars = os.path.join(vs_path, 'VC', 'WPSDK', 'WP80', 'vcvarsphoneall.bat') if os.path.isfile(vsvars): conf.gather_winphone_targets(versions, '8.0', vc_path, vsvars) break for version,vc_path in vc_paths: vs_path = os.path.dirname(vc_path) conf.gather_msvc_targets(versions, version, vc_path) @conf def gather_icl_versions(conf, versions): """ Checks ICL compilers :param versions: list to modify :type versions: list """ version_pattern = re.compile(r'^...?.?\....?.?') try: all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Intel\\Compilers\\C++') except OSError: try: all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Intel\\Compilers\\C++') except OSError: return index = 0 while 1: try: version = Utils.winreg.EnumKey(all_versions, index) except OSError: break index += 1 if not version_pattern.match(version): continue targets = {} for target,arch in all_icl_platforms: if target=='intel64': targetDir='EM64T_NATIVE' else: targetDir=target try: Utils.winreg.OpenKey(all_versions,version+'\\'+targetDir) icl_version=Utils.winreg.OpenKey(all_versions,version) path,type=Utils.winreg.QueryValueEx(icl_version,'ProductDir') except OSError: pass else: batch_file=os.path.join(path,'bin','iclvars.bat') if os.path.isfile(batch_file): targets[target] = target_compiler(conf, 'intel', arch, version, target, batch_file) for target,arch in all_icl_platforms: try: icl_version = Utils.winreg.OpenKey(all_versions, version+'\\'+target) path,type = Utils.winreg.QueryValueEx(icl_version,'ProductDir') except OSError: continue else: batch_file=os.path.join(path,'bin','iclvars.bat') if os.path.isfile(batch_file): targets[target] = target_compiler(conf, 'intel', arch, version, target, batch_file) major = version[0:2] versions['intel ' + major] = targets @conf def gather_intel_composer_versions(conf, versions): """ Checks ICL compilers that are part of Intel Composer Suites :param versions: list to modify :type versions: list """ version_pattern = re.compile(r'^...?.?\...?.?.?') try: all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Intel\\Suites') except OSError: try: all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Intel\\Suites') except OSError: return index = 0 while 1: try: version = Utils.winreg.EnumKey(all_versions, index) except OSError: break index += 1 if not version_pattern.match(version): continue targets = {} for target,arch in all_icl_platforms: if target=='intel64': targetDir='EM64T_NATIVE' else: targetDir=target try: try: defaults = Utils.winreg.OpenKey(all_versions,version+'\\Defaults\\C++\\'+targetDir) except OSError: if targetDir == 'EM64T_NATIVE': defaults = Utils.winreg.OpenKey(all_versions,version+'\\Defaults\\C++\\EM64T') else: raise uid,type = Utils.winreg.QueryValueEx(defaults, 'SubKey') Utils.winreg.OpenKey(all_versions,version+'\\'+uid+'\\C++\\'+targetDir) icl_version=Utils.winreg.OpenKey(all_versions,version+'\\'+uid+'\\C++') path,type=Utils.winreg.QueryValueEx(icl_version,'ProductDir') except OSError: pass else: batch_file=os.path.join(path,'bin','iclvars.bat') if os.path.isfile(batch_file): targets[target] = target_compiler(conf, 'intel', arch, version, target, batch_file) # The intel compilervar_arch.bat is broken when used with Visual Studio Express 2012 # http://software.intel.com/en-us/forums/topic/328487 compilervars_warning_attr = '_compilervars_warning_key' if version[0:2] == '13' and getattr(conf, compilervars_warning_attr, True): setattr(conf, compilervars_warning_attr, False) patch_url = 'http://software.intel.com/en-us/forums/topic/328487' compilervars_arch = os.path.join(path, 'bin', 'compilervars_arch.bat') for vscomntool in ('VS110COMNTOOLS', 'VS100COMNTOOLS'): if vscomntool in os.environ: vs_express_path = os.environ[vscomntool] + r'..\IDE\VSWinExpress.exe' dev_env_path = os.environ[vscomntool] + r'..\IDE\devenv.exe' if (r'if exist "%VS110COMNTOOLS%..\IDE\VSWinExpress.exe"' in Utils.readf(compilervars_arch) and not os.path.exists(vs_express_path) and not os.path.exists(dev_env_path)): Logs.warn(('The Intel compilervar_arch.bat only checks for one Visual Studio SKU ' '(VSWinExpress.exe) but it does not seem to be installed at %r. ' 'The intel command line set up will fail to configure unless the file %r' 'is patched. See: %s') % (vs_express_path, compilervars_arch, patch_url)) major = version[0:2] versions['intel ' + major] = targets @conf def detect_msvc(self): return self.setup_msvc(self.get_msvc_versions()) @conf def get_msvc_versions(self): """ :return: platform to compiler configurations :rtype: dict """ dct = Utils.ordered_iter_dict() self.gather_icl_versions(dct) self.gather_intel_composer_versions(dct) self.gather_wsdk_versions(dct) self.gather_msvc_versions(dct) self.gather_vswhere_versions(dct) Logs.debug('msvc: detected versions %r', list(dct.keys())) return dct @conf def find_lt_names_msvc(self, libname, is_static=False): """ Win32/MSVC specific code to glean out information from libtool la files. this function is not attached to the task_gen class. Returns a triplet: (library absolute path, library name without extension, whether the library is static) """ lt_names=[ 'lib%s.la' % libname, '%s.la' % libname, ] for path in self.env.LIBPATH: for la in lt_names: laf=os.path.join(path,la) dll=None if os.path.exists(laf): ltdict = Utils.read_la_file(laf) lt_libdir=None if ltdict.get('libdir', ''): lt_libdir = ltdict['libdir'] if not is_static and ltdict.get('library_names', ''): dllnames=ltdict['library_names'].split() dll=dllnames[0].lower() dll=re.sub(r'\.dll$', '', dll) return (lt_libdir, dll, False) elif ltdict.get('old_library', ''): olib=ltdict['old_library'] if os.path.exists(os.path.join(path,olib)): return (path, olib, True) elif lt_libdir != '' and os.path.exists(os.path.join(lt_libdir,olib)): return (lt_libdir, olib, True) else: return (None, olib, True) else: raise self.errors.WafError('invalid libtool object file: %s' % laf) return (None, None, None) @conf def libname_msvc(self, libname, is_static=False): lib = libname.lower() lib = re.sub(r'\.lib$','',lib) if lib in g_msvc_systemlibs: return lib lib=re.sub('^lib','',lib) if lib == 'm': return None (lt_path, lt_libname, lt_static) = self.find_lt_names_msvc(lib, is_static) if lt_path != None and lt_libname != None: if lt_static: # file existence check has been made by find_lt_names return os.path.join(lt_path,lt_libname) if lt_path != None: _libpaths = [lt_path] + self.env.LIBPATH else: _libpaths = self.env.LIBPATH static_libs=[ 'lib%ss.lib' % lib, 'lib%s.lib' % lib, '%ss.lib' % lib, '%s.lib' %lib, ] dynamic_libs=[ 'lib%s.dll.lib' % lib, 'lib%s.dll.a' % lib, '%s.dll.lib' % lib, '%s.dll.a' % lib, 'lib%s_d.lib' % lib, '%s_d.lib' % lib, '%s.lib' %lib, ] libnames=static_libs if not is_static: libnames=dynamic_libs + static_libs for path in _libpaths: for libn in libnames: if os.path.exists(os.path.join(path, libn)): Logs.debug('msvc: lib found: %s', os.path.join(path,libn)) return re.sub(r'\.lib$', '',libn) #if no lib can be found, just return the libname as msvc expects it self.fatal('The library %r could not be found' % libname) return re.sub(r'\.lib$', '', libname) @conf def check_lib_msvc(self, libname, is_static=False, uselib_store=None): """ Ideally we should be able to place the lib in the right env var, either STLIB or LIB, but we don't distinguish static libs from shared libs. This is ok since msvc doesn't have any special linker flag to select static libs (no env.STLIB_MARKER) """ libn = self.libname_msvc(libname, is_static) if not uselib_store: uselib_store = libname.upper() if False and is_static: # disabled self.env['STLIB_' + uselib_store] = [libn] else: self.env['LIB_' + uselib_store] = [libn] @conf def check_libs_msvc(self, libnames, is_static=False): for libname in Utils.to_list(libnames): self.check_lib_msvc(libname, is_static) def configure(conf): """ Configuration methods to call for detecting msvc """ conf.autodetect(True) conf.find_msvc() conf.msvc_common_flags() conf.cc_load_tools() conf.cxx_load_tools() conf.cc_add_flags() conf.cxx_add_flags() conf.link_add_flags() conf.visual_studio_add_flags() @conf def no_autodetect(conf): conf.env.NO_MSVC_DETECT = 1 configure(conf) @conf def autodetect(conf, arch=False): v = conf.env if v.NO_MSVC_DETECT: return compiler, version, path, includes, libdirs, cpu = conf.detect_msvc() if arch: v.DEST_CPU = cpu v.PATH = path v.INCLUDES = includes v.LIBPATH = libdirs v.MSVC_COMPILER = compiler try: v.MSVC_VERSION = float(version) except ValueError: v.MSVC_VERSION = float(version[:-3]) def _get_prog_names(conf, compiler): if compiler == 'intel': compiler_name = 'ICL' linker_name = 'XILINK' lib_name = 'XILIB' else: # assumes CL.exe compiler_name = 'CL' linker_name = 'LINK' lib_name = 'LIB' return compiler_name, linker_name, lib_name @conf def find_msvc(conf): """Due to path format limitations, limit operation only to native Win32. Yeah it sucks.""" if sys.platform == 'cygwin': conf.fatal('MSVC module does not work under cygwin Python!') # the autodetection is supposed to be performed before entering in this method v = conf.env path = v.PATH compiler = v.MSVC_COMPILER version = v.MSVC_VERSION compiler_name, linker_name, lib_name = _get_prog_names(conf, compiler) v.MSVC_MANIFEST = (compiler == 'msvc' and version >= 8) or (compiler == 'wsdk' and version >= 6) or (compiler == 'intel' and version >= 11) # compiler cxx = conf.find_program(compiler_name, var='CXX', path_list=path) # before setting anything, check if the compiler is really msvc env = dict(conf.environ) if path: env.update(PATH = ';'.join(path)) if not conf.cmd_and_log(cxx + ['/nologo', '/help'], env=env): conf.fatal('the msvc compiler could not be identified') # c/c++ compiler v.CC = v.CXX = cxx v.CC_NAME = v.CXX_NAME = 'msvc' # linker if not v.LINK_CXX: conf.find_program(linker_name, path_list=path, errmsg='%s was not found (linker)' % linker_name, var='LINK_CXX') if not v.LINK_CC: v.LINK_CC = v.LINK_CXX # staticlib linker if not v.AR: stliblink = conf.find_program(lib_name, path_list=path, var='AR') if not stliblink: return v.ARFLAGS = ['/nologo'] # manifest tool. Not required for VS 2003 and below. Must have for VS 2005 and later if v.MSVC_MANIFEST: conf.find_program('MT', path_list=path, var='MT') v.MTFLAGS = ['/nologo'] try: conf.load('winres') except Errors.ConfigurationError: Logs.warn('Resource compiler not found. Compiling resource file is disabled') @conf def visual_studio_add_flags(self): """visual studio flags found in the system environment""" v = self.env if self.environ.get('INCLUDE'): v.prepend_value('INCLUDES', [x for x in self.environ['INCLUDE'].split(';') if x]) # notice the 'S' if self.environ.get('LIB'): v.prepend_value('LIBPATH', [x for x in self.environ['LIB'].split(';') if x]) @conf def msvc_common_flags(conf): """ Setup the flags required for executing the msvc compiler """ v = conf.env v.DEST_BINFMT = 'pe' v.append_value('CFLAGS', ['/nologo']) v.append_value('CXXFLAGS', ['/nologo']) v.append_value('LINKFLAGS', ['/nologo']) v.DEFINES_ST = '/D%s' v.CC_SRC_F = '' v.CC_TGT_F = ['/c', '/Fo'] v.CXX_SRC_F = '' v.CXX_TGT_F = ['/c', '/Fo'] if (v.MSVC_COMPILER == 'msvc' and v.MSVC_VERSION >= 8) or (v.MSVC_COMPILER == 'wsdk' and v.MSVC_VERSION >= 6): v.CC_TGT_F = ['/FC'] + v.CC_TGT_F v.CXX_TGT_F = ['/FC'] + v.CXX_TGT_F v.CPPPATH_ST = '/I%s' # template for adding include paths v.AR_TGT_F = v.CCLNK_TGT_F = v.CXXLNK_TGT_F = '/OUT:' # CRT specific flags v.CFLAGS_CRT_MULTITHREADED = v.CXXFLAGS_CRT_MULTITHREADED = ['/MT'] v.CFLAGS_CRT_MULTITHREADED_DLL = v.CXXFLAGS_CRT_MULTITHREADED_DLL = ['/MD'] v.CFLAGS_CRT_MULTITHREADED_DBG = v.CXXFLAGS_CRT_MULTITHREADED_DBG = ['/MTd'] v.CFLAGS_CRT_MULTITHREADED_DLL_DBG = v.CXXFLAGS_CRT_MULTITHREADED_DLL_DBG = ['/MDd'] v.LIB_ST = '%s.lib' v.LIBPATH_ST = '/LIBPATH:%s' v.STLIB_ST = '%s.lib' v.STLIBPATH_ST = '/LIBPATH:%s' if v.MSVC_MANIFEST: v.append_value('LINKFLAGS', ['/MANIFEST']) v.CFLAGS_cshlib = [] v.CXXFLAGS_cxxshlib = [] v.LINKFLAGS_cshlib = v.LINKFLAGS_cxxshlib = ['/DLL'] v.cshlib_PATTERN = v.cxxshlib_PATTERN = '%s.dll' v.implib_PATTERN = '%s.lib' v.IMPLIB_ST = '/IMPLIB:%s' v.LINKFLAGS_cstlib = [] v.cstlib_PATTERN = v.cxxstlib_PATTERN = '%s.lib' v.cprogram_PATTERN = v.cxxprogram_PATTERN = '%s.exe' v.def_PATTERN = '/def:%s' ####################################################################################################### ##### conf above, build below @after_method('apply_link') @feature('c', 'cxx') def apply_flags_msvc(self): """ Add additional flags implied by msvc, such as subsystems and pdb files:: def build(bld): bld.stlib(source='main.c', target='bar', subsystem='gruik') """ if self.env.CC_NAME != 'msvc' or not getattr(self, 'link_task', None): return is_static = isinstance(self.link_task, ccroot.stlink_task) subsystem = getattr(self, 'subsystem', '') if subsystem: subsystem = '/subsystem:%s' % subsystem flags = is_static and 'ARFLAGS' or 'LINKFLAGS' self.env.append_value(flags, subsystem) if not is_static: for f in self.env.LINKFLAGS: d = f.lower() if d[1:] in ('debug', 'debug:full', 'debug:fastlink'): pdbnode = self.link_task.outputs[0].change_ext('.pdb') self.link_task.outputs.append(pdbnode) if getattr(self, 'install_task', None): self.pdb_install_task = self.add_install_files( install_to=self.install_task.install_to, install_from=pdbnode) break @feature('cprogram', 'cshlib', 'cxxprogram', 'cxxshlib') @after_method('apply_link') def apply_manifest(self): """ Special linker for MSVC with support for embedding manifests into DLL's and executables compiled by Visual Studio 2005 or probably later. Without the manifest file, the binaries are unusable. See: http://msdn2.microsoft.com/en-us/library/ms235542(VS.80).aspx """ if self.env.CC_NAME == 'msvc' and self.env.MSVC_MANIFEST and getattr(self, 'link_task', None): out_node = self.link_task.outputs[0] man_node = out_node.parent.find_or_declare(out_node.name + '.manifest') self.link_task.outputs.append(man_node) self.env.DO_MANIFEST = True def make_winapp(self, family): append = self.env.append_unique append('DEFINES', 'WINAPI_FAMILY=%s' % family) append('CXXFLAGS', ['/ZW', '/TP']) for lib_path in self.env.LIBPATH: append('CXXFLAGS','/AI%s'%lib_path) @feature('winphoneapp') @after_method('process_use') @after_method('propagate_uselib_vars') def make_winphone_app(self): """ Insert configuration flags for windows phone applications (adds /ZW, /TP...) """ make_winapp(self, 'WINAPI_FAMILY_PHONE_APP') self.env.append_unique('LINKFLAGS', ['/NODEFAULTLIB:ole32.lib', 'PhoneAppModelHost.lib']) @feature('winapp') @after_method('process_use') @after_method('propagate_uselib_vars') def make_windows_app(self): """ Insert configuration flags for windows applications (adds /ZW, /TP...) """ make_winapp(self, 'WINAPI_FAMILY_DESKTOP_APP') tdb-1.4.2/third_party/waf/waflib/Tools/nasm.py0000660000000000000000000000111413444661622021212 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2008-2018 (ita) """ Nasm tool (asm processing) """ import os import waflib.Tools.asm # leave this from waflib.TaskGen import feature @feature('asm') def apply_nasm_vars(self): """provided for compatibility""" self.env.append_value('ASFLAGS', self.to_list(getattr(self, 'nasm_flags', []))) def configure(conf): """ Detect nasm/yasm and set the variable *AS* """ conf.find_program(['nasm', 'yasm'], var='AS') conf.env.AS_TGT_F = ['-o'] conf.env.ASLNK_TGT_F = ['-o'] conf.load('asm') conf.env.ASMPATH_ST = '-I%s' + os.sep tdb-1.4.2/third_party/waf/waflib/Tools/nobuild.py0000660000000000000000000000064313444661622021716 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2015 (ita) """ Override the build commands to write empty files. This is useful for profiling and evaluating the Python overhead. To use:: def build(bld): ... bld.load('nobuild') """ from waflib import Task def build(bld): def run(self): for x in self.outputs: x.write('') for (name, cls) in Task.classes.items(): cls.run = run tdb-1.4.2/third_party/waf/waflib/Tools/perl.py0000660000000000000000000001064513444661622021227 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # andersg at 0x63.nu 2007 # Thomas Nagy 2016-2018 (ita) """ Support for Perl extensions. A C/C++ compiler is required:: def options(opt): opt.load('compiler_c perl') def configure(conf): conf.load('compiler_c perl') conf.check_perl_version((5,6,0)) conf.check_perl_ext_devel() conf.check_perl_module('Cairo') conf.check_perl_module('Devel::PPPort 4.89') def build(bld): bld( features = 'c cshlib perlext', source = 'Mytest.xs', target = 'Mytest', install_path = '${ARCHDIR_PERL}/auto') bld.install_files('${ARCHDIR_PERL}', 'Mytest.pm') """ import os from waflib import Task, Options, Utils, Errors from waflib.Configure import conf from waflib.TaskGen import extension, feature, before_method @before_method('apply_incpaths', 'apply_link', 'propagate_uselib_vars') @feature('perlext') def init_perlext(self): """ Change the values of *cshlib_PATTERN* and *cxxshlib_PATTERN* to remove the *lib* prefix from library names. """ self.uselib = self.to_list(getattr(self, 'uselib', [])) if not 'PERLEXT' in self.uselib: self.uselib.append('PERLEXT') self.env.cshlib_PATTERN = self.env.cxxshlib_PATTERN = self.env.perlext_PATTERN @extension('.xs') def xsubpp_file(self, node): """ Create :py:class:`waflib.Tools.perl.xsubpp` tasks to process *.xs* files """ outnode = node.change_ext('.c') self.create_task('xsubpp', node, outnode) self.source.append(outnode) class xsubpp(Task.Task): """ Process *.xs* files """ run_str = '${PERL} ${XSUBPP} -noprototypes -typemap ${EXTUTILS_TYPEMAP} ${SRC} > ${TGT}' color = 'BLUE' ext_out = ['.h'] @conf def check_perl_version(self, minver=None): """ Check if Perl is installed, and set the variable PERL. minver is supposed to be a tuple """ res = True if minver: cver = '.'.join(map(str,minver)) else: cver = '' self.start_msg('Checking for minimum perl version %s' % cver) perl = self.find_program('perl', var='PERL', value=getattr(Options.options, 'perlbinary', None)) version = self.cmd_and_log(perl + ["-e", 'printf \"%vd\", $^V']) if not version: res = False version = "Unknown" elif not minver is None: ver = tuple(map(int, version.split("."))) if ver < minver: res = False self.end_msg(version, color=res and 'GREEN' or 'YELLOW') return res @conf def check_perl_module(self, module): """ Check if specified perlmodule is installed. The minimum version can be specified by specifying it after modulename like this:: def configure(conf): conf.check_perl_module("Some::Module 2.92") """ cmd = self.env.PERL + ['-e', 'use %s' % module] self.start_msg('perl module %s' % module) try: r = self.cmd_and_log(cmd) except Errors.WafError: self.end_msg(False) return None self.end_msg(r or True) return r @conf def check_perl_ext_devel(self): """ Check for configuration needed to build perl extensions. Sets different xxx_PERLEXT variables in the environment. Also sets the ARCHDIR_PERL variable useful as installation path, which can be overridden by ``--with-perl-archdir`` option. """ env = self.env perl = env.PERL if not perl: self.fatal('find perl first') def cmd_perl_config(s): return perl + ['-MConfig', '-e', 'print \"%s\"' % s] def cfg_str(cfg): return self.cmd_and_log(cmd_perl_config(cfg)) def cfg_lst(cfg): return Utils.to_list(cfg_str(cfg)) def find_xsubpp(): for var in ('privlib', 'vendorlib'): xsubpp = cfg_lst('$Config{%s}/ExtUtils/xsubpp$Config{exe_ext}' % var) if xsubpp and os.path.isfile(xsubpp[0]): return xsubpp return self.find_program('xsubpp') env.LINKFLAGS_PERLEXT = cfg_lst('$Config{lddlflags}') env.INCLUDES_PERLEXT = cfg_lst('$Config{archlib}/CORE') env.CFLAGS_PERLEXT = cfg_lst('$Config{ccflags} $Config{cccdlflags}') env.EXTUTILS_TYPEMAP = cfg_lst('$Config{privlib}/ExtUtils/typemap') env.XSUBPP = find_xsubpp() if not getattr(Options.options, 'perlarchdir', None): env.ARCHDIR_PERL = cfg_str('$Config{sitearch}') else: env.ARCHDIR_PERL = getattr(Options.options, 'perlarchdir') env.perlext_PATTERN = '%s.' + cfg_str('$Config{dlext}') def options(opt): """ Add the ``--with-perl-archdir`` and ``--with-perl-binary`` command-line options. """ opt.add_option('--with-perl-binary', type='string', dest='perlbinary', help = 'Specify alternate perl binary', default=None) opt.add_option('--with-perl-archdir', type='string', dest='perlarchdir', help = 'Specify directory where to install arch specific files', default=None) tdb-1.4.2/third_party/waf/waflib/Tools/python.py0000660000000000000000000005262013527011455021600 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2007-2015 (ita) # Gustavo Carneiro (gjc), 2007 """ Support for Python, detect the headers and libraries and provide *use* variables to link C/C++ programs against them:: def options(opt): opt.load('compiler_c python') def configure(conf): conf.load('compiler_c python') conf.check_python_version((2,4,2)) conf.check_python_headers() def build(bld): bld.program(features='pyembed', source='a.c', target='myprog') bld.shlib(features='pyext', source='b.c', target='mylib') """ import os, sys from waflib import Errors, Logs, Node, Options, Task, Utils from waflib.TaskGen import extension, before_method, after_method, feature from waflib.Configure import conf FRAG = ''' #include #ifdef __cplusplus extern "C" { #endif void Py_Initialize(void); void Py_Finalize(void); #ifdef __cplusplus } #endif int main(int argc, char **argv) { (void)argc; (void)argv; Py_Initialize(); Py_Finalize(); return 0; } ''' """ Piece of C/C++ code used in :py:func:`waflib.Tools.python.check_python_headers` """ INST = ''' import sys, py_compile py_compile.compile(sys.argv[1], sys.argv[2], sys.argv[3], True) ''' """ Piece of Python code used in :py:class:`waflib.Tools.python.pyo` and :py:class:`waflib.Tools.python.pyc` for byte-compiling python files """ DISTUTILS_IMP = ['from distutils.sysconfig import get_config_var, get_python_lib'] @before_method('process_source') @feature('py') def feature_py(self): """ Create tasks to byte-compile .py files and install them, if requested """ self.install_path = getattr(self, 'install_path', '${PYTHONDIR}') install_from = getattr(self, 'install_from', None) if install_from and not isinstance(install_from, Node.Node): install_from = self.path.find_dir(install_from) self.install_from = install_from ver = self.env.PYTHON_VERSION if not ver: self.bld.fatal('Installing python files requires PYTHON_VERSION, try conf.check_python_version') if int(ver.replace('.', '')) > 31: self.install_32 = True @extension('.py') def process_py(self, node): """ Add signature of .py file, so it will be byte-compiled when necessary """ assert(hasattr(self, 'install_path')), 'add features="py"' # where to install the python file if self.install_path: if self.install_from: self.add_install_files(install_to=self.install_path, install_from=node, cwd=self.install_from, relative_trick=True) else: self.add_install_files(install_to=self.install_path, install_from=node, relative_trick=True) lst = [] if self.env.PYC: lst.append('pyc') if self.env.PYO: lst.append('pyo') if self.install_path: if self.install_from: pyd = Utils.subst_vars("%s/%s" % (self.install_path, node.path_from(self.install_from)), self.env) else: pyd = Utils.subst_vars("%s/%s" % (self.install_path, node.path_from(self.path)), self.env) else: pyd = node.abspath() for ext in lst: if self.env.PYTAG and not self.env.NOPYCACHE: # __pycache__ installation for python 3.2 - PEP 3147 name = node.name[:-3] pyobj = node.parent.get_bld().make_node('__pycache__').make_node("%s.%s.%s" % (name, self.env.PYTAG, ext)) pyobj.parent.mkdir() else: pyobj = node.change_ext(".%s" % ext) tsk = self.create_task(ext, node, pyobj) tsk.pyd = pyd if self.install_path: self.add_install_files(install_to=os.path.dirname(pyd), install_from=pyobj, cwd=node.parent.get_bld(), relative_trick=True) class pyc(Task.Task): """ Byte-compiling python files """ color = 'PINK' def __str__(self): node = self.outputs[0] return node.path_from(node.ctx.launch_node()) def run(self): cmd = [Utils.subst_vars('${PYTHON}', self.env), '-c', INST, self.inputs[0].abspath(), self.outputs[0].abspath(), self.pyd] ret = self.generator.bld.exec_command(cmd) return ret class pyo(Task.Task): """ Byte-compiling python files """ color = 'PINK' def __str__(self): node = self.outputs[0] return node.path_from(node.ctx.launch_node()) def run(self): cmd = [Utils.subst_vars('${PYTHON}', self.env), Utils.subst_vars('${PYFLAGS_OPT}', self.env), '-c', INST, self.inputs[0].abspath(), self.outputs[0].abspath(), self.pyd] ret = self.generator.bld.exec_command(cmd) return ret @feature('pyext') @before_method('propagate_uselib_vars', 'apply_link') @after_method('apply_bundle') def init_pyext(self): """ Change the values of *cshlib_PATTERN* and *cxxshlib_PATTERN* to remove the *lib* prefix from library names. """ self.uselib = self.to_list(getattr(self, 'uselib', [])) if not 'PYEXT' in self.uselib: self.uselib.append('PYEXT') # override shlib_PATTERN set by the osx module self.env.cshlib_PATTERN = self.env.cxxshlib_PATTERN = self.env.macbundle_PATTERN = self.env.pyext_PATTERN self.env.fcshlib_PATTERN = self.env.dshlib_PATTERN = self.env.pyext_PATTERN try: if not self.install_path: return except AttributeError: self.install_path = '${PYTHONARCHDIR}' @feature('pyext') @before_method('apply_link', 'apply_bundle') def set_bundle(self): """Mac-specific pyext extension that enables bundles from c_osx.py""" if Utils.unversioned_sys_platform() == 'darwin': self.mac_bundle = True @before_method('propagate_uselib_vars') @feature('pyembed') def init_pyembed(self): """ Add the PYEMBED variable. """ self.uselib = self.to_list(getattr(self, 'uselib', [])) if not 'PYEMBED' in self.uselib: self.uselib.append('PYEMBED') @conf def get_python_variables(self, variables, imports=None): """ Spawn a new python process to dump configuration variables :param variables: variables to print :type variables: list of string :param imports: one import by element :type imports: list of string :return: the variable values :rtype: list of string """ if not imports: try: imports = self.python_imports except AttributeError: imports = DISTUTILS_IMP program = list(imports) # copy program.append('') for v in variables: program.append("print(repr(%s))" % v) os_env = dict(os.environ) try: del os_env['MACOSX_DEPLOYMENT_TARGET'] # see comments in the OSX tool except KeyError: pass try: out = self.cmd_and_log(self.env.PYTHON + ['-c', '\n'.join(program)], env=os_env) except Errors.WafError: self.fatal('The distutils module is unusable: install "python-devel"?') self.to_log(out) return_values = [] for s in out.splitlines(): s = s.strip() if not s: continue if s == 'None': return_values.append(None) elif (s[0] == "'" and s[-1] == "'") or (s[0] == '"' and s[-1] == '"'): return_values.append(eval(s)) elif s[0].isdigit(): return_values.append(int(s)) else: break return return_values @conf def test_pyembed(self, mode, msg='Testing pyembed configuration'): self.check(header_name='Python.h', define_name='HAVE_PYEMBED', msg=msg, fragment=FRAG, errmsg='Could not build a python embedded interpreter', features='%s %sprogram pyembed' % (mode, mode)) @conf def test_pyext(self, mode, msg='Testing pyext configuration'): self.check(header_name='Python.h', define_name='HAVE_PYEXT', msg=msg, fragment=FRAG, errmsg='Could not build python extensions', features='%s %sshlib pyext' % (mode, mode)) @conf def python_cross_compile(self, features='pyembed pyext'): """ For cross-compilation purposes, it is possible to bypass the normal detection and set the flags that you want: PYTHON_VERSION='3.4' PYTAG='cpython34' pyext_PATTERN="%s.so" PYTHON_LDFLAGS='-lpthread -ldl' waf configure The following variables are used: PYTHON_VERSION required PYTAG required PYTHON_LDFLAGS required pyext_PATTERN required PYTHON_PYEXT_LDFLAGS PYTHON_PYEMBED_LDFLAGS """ features = Utils.to_list(features) if not ('PYTHON_LDFLAGS' in self.environ or 'PYTHON_PYEXT_LDFLAGS' in self.environ or 'PYTHON_PYEMBED_LDFLAGS' in self.environ): return False for x in 'PYTHON_VERSION PYTAG pyext_PATTERN'.split(): if not x in self.environ: self.fatal('Please set %s in the os environment' % x) else: self.env[x] = self.environ[x] xx = self.env.CXX_NAME and 'cxx' or 'c' if 'pyext' in features: flags = self.environ.get('PYTHON_PYEXT_LDFLAGS', self.environ.get('PYTHON_LDFLAGS')) if flags is None: self.fatal('No flags provided through PYTHON_PYEXT_LDFLAGS as required') else: self.parse_flags(flags, 'PYEXT') self.test_pyext(xx) if 'pyembed' in features: flags = self.environ.get('PYTHON_PYEMBED_LDFLAGS', self.environ.get('PYTHON_LDFLAGS')) if flags is None: self.fatal('No flags provided through PYTHON_PYEMBED_LDFLAGS as required') else: self.parse_flags(flags, 'PYEMBED') self.test_pyembed(xx) return True @conf def check_python_headers(conf, features='pyembed pyext'): """ Check for headers and libraries necessary to extend or embed python by using the module *distutils*. On success the environment variables xxx_PYEXT and xxx_PYEMBED are added: * PYEXT: for compiling python extensions * PYEMBED: for embedding a python interpreter """ features = Utils.to_list(features) assert ('pyembed' in features) or ('pyext' in features), "check_python_headers features must include 'pyembed' and/or 'pyext'" env = conf.env if not env.CC_NAME and not env.CXX_NAME: conf.fatal('load a compiler first (gcc, g++, ..)') # bypass all the code below for cross-compilation if conf.python_cross_compile(features): return if not env.PYTHON_VERSION: conf.check_python_version() pybin = env.PYTHON if not pybin: conf.fatal('Could not find the python executable') # so we actually do all this for compatibility reasons and for obtaining pyext_PATTERN below v = 'prefix SO LDFLAGS LIBDIR LIBPL INCLUDEPY Py_ENABLE_SHARED MACOSX_DEPLOYMENT_TARGET LDSHARED CFLAGS LDVERSION'.split() try: lst = conf.get_python_variables(["get_config_var('%s') or ''" % x for x in v]) except RuntimeError: conf.fatal("Python development headers not found (-v for details).") vals = ['%s = %r' % (x, y) for (x, y) in zip(v, lst)] conf.to_log("Configuration returned from %r:\n%s\n" % (pybin, '\n'.join(vals))) dct = dict(zip(v, lst)) x = 'MACOSX_DEPLOYMENT_TARGET' if dct[x]: env[x] = conf.environ[x] = dct[x] env.pyext_PATTERN = '%s' + dct['SO'] # not a mistake # Try to get pythonX.Y-config num = '.'.join(env.PYTHON_VERSION.split('.')[:2]) conf.find_program([''.join(pybin) + '-config', 'python%s-config' % num, 'python-config-%s' % num, 'python%sm-config' % num], var='PYTHON_CONFIG', msg="python-config", mandatory=False) if env.PYTHON_CONFIG: # check python-config output only once if conf.env.HAVE_PYTHON_H: return # python2.6-config requires 3 runs all_flags = [['--cflags', '--libs', '--ldflags']] if sys.hexversion < 0x2070000: all_flags = [[k] for k in all_flags[0]] xx = env.CXX_NAME and 'cxx' or 'c' if 'pyembed' in features: for flags in all_flags: # Python 3.8 has different flags for pyembed, needs --embed embedflags = flags + ['--embed'] try: conf.check_cfg(msg='Asking python-config for pyembed %r flags' % ' '.join(embedflags), path=env.PYTHON_CONFIG, package='', uselib_store='PYEMBED', args=embedflags) except conf.errors.ConfigurationError: # However Python < 3.8 doesn't accept --embed, so we need a fallback conf.check_cfg(msg='Asking python-config for pyembed %r flags' % ' '.join(flags), path=env.PYTHON_CONFIG, package='', uselib_store='PYEMBED', args=flags) try: conf.test_pyembed(xx) except conf.errors.ConfigurationError: # python bug 7352 if dct['Py_ENABLE_SHARED'] and dct['LIBDIR']: env.append_unique('LIBPATH_PYEMBED', [dct['LIBDIR']]) conf.test_pyembed(xx) else: raise if 'pyext' in features: for flags in all_flags: conf.check_cfg(msg='Asking python-config for pyext %r flags' % ' '.join(flags), path=env.PYTHON_CONFIG, package='', uselib_store='PYEXT', args=flags) try: conf.test_pyext(xx) except conf.errors.ConfigurationError: # python bug 7352 if dct['Py_ENABLE_SHARED'] and dct['LIBDIR']: env.append_unique('LIBPATH_PYEXT', [dct['LIBDIR']]) conf.test_pyext(xx) else: raise conf.define('HAVE_PYTHON_H', 1) return # No python-config, do something else on windows systems all_flags = dct['LDFLAGS'] + ' ' + dct['CFLAGS'] conf.parse_flags(all_flags, 'PYEMBED') all_flags = dct['LDFLAGS'] + ' ' + dct['LDSHARED'] + ' ' + dct['CFLAGS'] conf.parse_flags(all_flags, 'PYEXT') result = None if not dct["LDVERSION"]: dct["LDVERSION"] = env.PYTHON_VERSION # further simplification will be complicated for name in ('python' + dct['LDVERSION'], 'python' + env.PYTHON_VERSION + 'm', 'python' + env.PYTHON_VERSION.replace('.', '')): # LIBPATH_PYEMBED is already set; see if it works. if not result and env.LIBPATH_PYEMBED: path = env.LIBPATH_PYEMBED conf.to_log("\n\n# Trying default LIBPATH_PYEMBED: %r\n" % path) result = conf.check(lib=name, uselib='PYEMBED', libpath=path, mandatory=False, msg='Checking for library %s in LIBPATH_PYEMBED' % name) if not result and dct['LIBDIR']: path = [dct['LIBDIR']] conf.to_log("\n\n# try again with -L$python_LIBDIR: %r\n" % path) result = conf.check(lib=name, uselib='PYEMBED', libpath=path, mandatory=False, msg='Checking for library %s in LIBDIR' % name) if not result and dct['LIBPL']: path = [dct['LIBPL']] conf.to_log("\n\n# try again with -L$python_LIBPL (some systems don't install the python library in $prefix/lib)\n") result = conf.check(lib=name, uselib='PYEMBED', libpath=path, mandatory=False, msg='Checking for library %s in python_LIBPL' % name) if not result: path = [os.path.join(dct['prefix'], "libs")] conf.to_log("\n\n# try again with -L$prefix/libs, and pythonXY name rather than pythonX.Y (win32)\n") result = conf.check(lib=name, uselib='PYEMBED', libpath=path, mandatory=False, msg='Checking for library %s in $prefix/libs' % name) if result: break # do not forget to set LIBPATH_PYEMBED if result: env.LIBPATH_PYEMBED = path env.append_value('LIB_PYEMBED', [name]) else: conf.to_log("\n\n### LIB NOT FOUND\n") # under certain conditions, python extensions must link to # python libraries, not just python embedding programs. if Utils.is_win32 or dct['Py_ENABLE_SHARED']: env.LIBPATH_PYEXT = env.LIBPATH_PYEMBED env.LIB_PYEXT = env.LIB_PYEMBED conf.to_log("Include path for Python extensions (found via distutils module): %r\n" % (dct['INCLUDEPY'],)) env.INCLUDES_PYEXT = [dct['INCLUDEPY']] env.INCLUDES_PYEMBED = [dct['INCLUDEPY']] # Code using the Python API needs to be compiled with -fno-strict-aliasing if env.CC_NAME == 'gcc': env.append_value('CFLAGS_PYEMBED', ['-fno-strict-aliasing']) env.append_value('CFLAGS_PYEXT', ['-fno-strict-aliasing']) if env.CXX_NAME == 'gcc': env.append_value('CXXFLAGS_PYEMBED', ['-fno-strict-aliasing']) env.append_value('CXXFLAGS_PYEXT', ['-fno-strict-aliasing']) if env.CC_NAME == "msvc": from distutils.msvccompiler import MSVCCompiler dist_compiler = MSVCCompiler() dist_compiler.initialize() env.append_value('CFLAGS_PYEXT', dist_compiler.compile_options) env.append_value('CXXFLAGS_PYEXT', dist_compiler.compile_options) env.append_value('LINKFLAGS_PYEXT', dist_compiler.ldflags_shared) # See if it compiles conf.check(header_name='Python.h', define_name='HAVE_PYTHON_H', uselib='PYEMBED', fragment=FRAG, errmsg='Distutils not installed? Broken python installation? Get python-config now!') @conf def check_python_version(conf, minver=None): """ Check if the python interpreter is found matching a given minimum version. minver should be a tuple, eg. to check for python >= 2.4.2 pass (2,4,2) as minver. If successful, PYTHON_VERSION is defined as 'MAJOR.MINOR' (eg. '2.4') of the actual python version found, and PYTHONDIR and PYTHONARCHDIR are defined, pointing to the site-packages directories appropriate for this python version, where modules/packages/extensions should be installed. :param minver: minimum version :type minver: tuple of int """ assert minver is None or isinstance(minver, tuple) pybin = conf.env.PYTHON if not pybin: conf.fatal('could not find the python executable') # Get python version string cmd = pybin + ['-c', 'import sys\nfor x in sys.version_info: print(str(x))'] Logs.debug('python: Running python command %r', cmd) lines = conf.cmd_and_log(cmd).split() assert len(lines) == 5, "found %r lines, expected 5: %r" % (len(lines), lines) pyver_tuple = (int(lines[0]), int(lines[1]), int(lines[2]), lines[3], int(lines[4])) # Compare python version with the minimum required result = (minver is None) or (pyver_tuple >= minver) if result: # define useful environment variables pyver = '.'.join([str(x) for x in pyver_tuple[:2]]) conf.env.PYTHON_VERSION = pyver if 'PYTHONDIR' in conf.env: # Check if --pythondir was specified pydir = conf.env.PYTHONDIR elif 'PYTHONDIR' in conf.environ: # Check environment for PYTHONDIR pydir = conf.environ['PYTHONDIR'] else: # Finally, try to guess if Utils.is_win32: (python_LIBDEST, pydir) = conf.get_python_variables( ["get_config_var('LIBDEST') or ''", "get_python_lib(standard_lib=0) or ''"]) else: python_LIBDEST = None (pydir,) = conf.get_python_variables( ["get_python_lib(standard_lib=0, prefix=%r) or ''" % conf.env.PREFIX]) if python_LIBDEST is None: if conf.env.LIBDIR: python_LIBDEST = os.path.join(conf.env.LIBDIR, 'python' + pyver) else: python_LIBDEST = os.path.join(conf.env.PREFIX, 'lib', 'python' + pyver) if 'PYTHONARCHDIR' in conf.env: # Check if --pythonarchdir was specified pyarchdir = conf.env.PYTHONARCHDIR elif 'PYTHONARCHDIR' in conf.environ: # Check environment for PYTHONDIR pyarchdir = conf.environ['PYTHONARCHDIR'] else: # Finally, try to guess (pyarchdir, ) = conf.get_python_variables( ["get_python_lib(plat_specific=1, standard_lib=0, prefix=%r) or ''" % conf.env.PREFIX]) if not pyarchdir: pyarchdir = pydir if hasattr(conf, 'define'): # conf.define is added by the C tool, so may not exist conf.define('PYTHONDIR', pydir) conf.define('PYTHONARCHDIR', pyarchdir) conf.env.PYTHONDIR = pydir conf.env.PYTHONARCHDIR = pyarchdir # Feedback pyver_full = '.'.join(map(str, pyver_tuple[:3])) if minver is None: conf.msg('Checking for python version', pyver_full) else: minver_str = '.'.join(map(str, minver)) conf.msg('Checking for python version >= %s' % (minver_str,), pyver_full, color=result and 'GREEN' or 'YELLOW') if not result: conf.fatal('The python version is too old, expecting %r' % (minver,)) PYTHON_MODULE_TEMPLATE = ''' import %s as current_module version = getattr(current_module, '__version__', None) if version is not None: print(str(version)) else: print('unknown version') ''' @conf def check_python_module(conf, module_name, condition=''): """ Check if the selected python interpreter can import the given python module:: def configure(conf): conf.check_python_module('pygccxml') conf.check_python_module('re', condition="ver > num(2, 0, 4) and ver <= num(3, 0, 0)") :param module_name: module :type module_name: string """ msg = "Checking for python module %r" % module_name if condition: msg = '%s (%s)' % (msg, condition) conf.start_msg(msg) try: ret = conf.cmd_and_log(conf.env.PYTHON + ['-c', PYTHON_MODULE_TEMPLATE % module_name]) except Errors.WafError: conf.end_msg(False) conf.fatal('Could not find the python module %r' % module_name) ret = ret.strip() if condition: conf.end_msg(ret) if ret == 'unknown version': conf.fatal('Could not check the %s version' % module_name) from distutils.version import LooseVersion def num(*k): if isinstance(k[0], int): return LooseVersion('.'.join([str(x) for x in k])) else: return LooseVersion(k[0]) d = {'num': num, 'ver': LooseVersion(ret)} ev = eval(condition, {}, d) if not ev: conf.fatal('The %s version does not satisfy the requirements' % module_name) else: if ret == 'unknown version': conf.end_msg(True) else: conf.end_msg(ret) def configure(conf): """ Detect the python interpreter """ v = conf.env if getattr(Options.options, 'pythondir', None): v.PYTHONDIR = Options.options.pythondir if getattr(Options.options, 'pythonarchdir', None): v.PYTHONARCHDIR = Options.options.pythonarchdir if getattr(Options.options, 'nopycache', None): v.NOPYCACHE=Options.options.nopycache if not v.PYTHON: v.PYTHON = [getattr(Options.options, 'python', None) or sys.executable] v.PYTHON = Utils.to_list(v.PYTHON) conf.find_program('python', var='PYTHON') v.PYFLAGS = '' v.PYFLAGS_OPT = '-O' v.PYC = getattr(Options.options, 'pyc', 1) v.PYO = getattr(Options.options, 'pyo', 1) try: v.PYTAG = conf.cmd_and_log(conf.env.PYTHON + ['-c', "import imp;print(imp.get_tag())"]).strip() except Errors.WafError: pass def options(opt): """ Add python-specific options """ pyopt=opt.add_option_group("Python Options") pyopt.add_option('--nopyc', dest = 'pyc', action='store_false', default=1, help = 'Do not install bytecode compiled .pyc files (configuration) [Default:install]') pyopt.add_option('--nopyo', dest='pyo', action='store_false', default=1, help='Do not install optimised compiled .pyo files (configuration) [Default:install]') pyopt.add_option('--nopycache',dest='nopycache', action='store_true', help='Do not use __pycache__ directory to install objects [Default:auto]') pyopt.add_option('--python', dest="python", help='python binary to be used [Default: %s]' % sys.executable) pyopt.add_option('--pythondir', dest='pythondir', help='Installation path for python modules (py, platform-independent .py and .pyc files)') pyopt.add_option('--pythonarchdir', dest='pythonarchdir', help='Installation path for python extension (pyext, platform-dependent .so or .dylib files)') tdb-1.4.2/third_party/waf/waflib/Tools/qt5.py0000660000000000000000000005642113527011455020773 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2018 (ita) """ This tool helps with finding Qt5 tools and libraries, and also provides syntactic sugar for using Qt5 tools. The following snippet illustrates the tool usage:: def options(opt): opt.load('compiler_cxx qt5') def configure(conf): conf.load('compiler_cxx qt5') def build(bld): bld( features = 'qt5 cxx cxxprogram', uselib = 'QT5CORE QT5GUI QT5OPENGL QT5SVG', source = 'main.cpp textures.qrc aboutDialog.ui', target = 'window', ) Here, the UI description and resource files will be processed to generate code. Usage ===== Load the "qt5" tool. You also need to edit your sources accordingly: - the normal way of doing things is to have your C++ files include the .moc file. This is regarded as the best practice (and provides much faster compilations). It also implies that the include paths have beenset properly. - to have the include paths added automatically, use the following:: from waflib.TaskGen import feature, before_method, after_method @feature('cxx') @after_method('process_source') @before_method('apply_incpaths') def add_includes_paths(self): incs = set(self.to_list(getattr(self, 'includes', ''))) for x in self.compiled_tasks: incs.add(x.inputs[0].parent.path_from(self.path)) self.includes = sorted(incs) Note: another tool provides Qt processing that does not require .moc includes, see 'playground/slow_qt/'. A few options (--qt{dir,bin,...}) and environment variables (QT5_{ROOT,DIR,MOC,UIC,XCOMPILE}) allow finer tuning of the tool, tool path selection, etc; please read the source for more info. The detection uses pkg-config on Linux by default. To force static library detection use: QT5_XCOMPILE=1 QT5_FORCE_STATIC=1 waf configure """ from __future__ import with_statement try: from xml.sax import make_parser from xml.sax.handler import ContentHandler except ImportError: has_xml = False ContentHandler = object else: has_xml = True import os, sys, re from waflib.Tools import cxx from waflib import Build, Task, Utils, Options, Errors, Context from waflib.TaskGen import feature, after_method, extension, before_method from waflib.Configure import conf from waflib import Logs MOC_H = ['.h', '.hpp', '.hxx', '.hh'] """ File extensions associated to .moc files """ EXT_RCC = ['.qrc'] """ File extension for the resource (.qrc) files """ EXT_UI = ['.ui'] """ File extension for the user interface (.ui) files """ EXT_QT5 = ['.cpp', '.cc', '.cxx', '.C'] """ File extensions of C++ files that may require a .moc processing """ class qxx(Task.classes['cxx']): """ Each C++ file can have zero or several .moc files to create. They are known only when the files are scanned (preprocessor) To avoid scanning the c++ files each time (parsing C/C++), the results are retrieved from the task cache (bld.node_deps/bld.raw_deps). The moc tasks are also created *dynamically* during the build. """ def __init__(self, *k, **kw): Task.Task.__init__(self, *k, **kw) self.moc_done = 0 def runnable_status(self): """ Compute the task signature to make sure the scanner was executed. Create the moc tasks by using :py:meth:`waflib.Tools.qt5.qxx.add_moc_tasks` (if necessary), then postpone the task execution (there is no need to recompute the task signature). """ if self.moc_done: return Task.Task.runnable_status(self) else: for t in self.run_after: if not t.hasrun: return Task.ASK_LATER self.add_moc_tasks() return Task.Task.runnable_status(self) def create_moc_task(self, h_node, m_node): """ If several libraries use the same classes, it is possible that moc will run several times (Issue 1318) It is not possible to change the file names, but we can assume that the moc transformation will be identical, and the moc tasks can be shared in a global cache. """ try: moc_cache = self.generator.bld.moc_cache except AttributeError: moc_cache = self.generator.bld.moc_cache = {} try: return moc_cache[h_node] except KeyError: tsk = moc_cache[h_node] = Task.classes['moc'](env=self.env, generator=self.generator) tsk.set_inputs(h_node) tsk.set_outputs(m_node) tsk.env.append_unique('MOC_FLAGS', '-i') if self.generator: self.generator.tasks.append(tsk) # direct injection in the build phase (safe because called from the main thread) gen = self.generator.bld.producer gen.outstanding.append(tsk) gen.total += 1 return tsk else: # remove the signature, it must be recomputed with the moc task delattr(self, 'cache_sig') def add_moc_tasks(self): """ Creates moc tasks by looking in the list of file dependencies ``bld.raw_deps[self.uid()]`` """ node = self.inputs[0] bld = self.generator.bld # skip on uninstall due to generated files if bld.is_install == Build.UNINSTALL: return try: # compute the signature once to know if there is a moc file to create self.signature() except KeyError: # the moc file may be referenced somewhere else pass else: # remove the signature, it must be recomputed with the moc task delattr(self, 'cache_sig') include_nodes = [node.parent] + self.generator.includes_nodes moctasks = [] mocfiles = set() for d in bld.raw_deps.get(self.uid(), []): if not d.endswith('.moc'): continue # process that base.moc only once if d in mocfiles: continue mocfiles.add(d) # find the source associated with the moc file h_node = None base2 = d[:-4] # foo.moc from foo.cpp prefix = node.name[:node.name.rfind('.')] if base2 == prefix: h_node = node else: # this deviates from the standard # if bar.cpp includes foo.moc, then assume it is from foo.h for x in include_nodes: for e in MOC_H: h_node = x.find_node(base2 + e) if h_node: break else: continue break if h_node: m_node = h_node.change_ext('.moc') else: raise Errors.WafError('No source found for %r which is a moc file' % d) # create the moc task task = self.create_moc_task(h_node, m_node) moctasks.append(task) # simple scheduler dependency: run the moc task before others self.run_after.update(set(moctasks)) self.moc_done = 1 class trans_update(Task.Task): """Updates a .ts files from a list of C++ files""" run_str = '${QT_LUPDATE} ${SRC} -ts ${TGT}' color = 'BLUE' class XMLHandler(ContentHandler): """ Parses ``.qrc`` files """ def __init__(self): ContentHandler.__init__(self) self.buf = [] self.files = [] def startElement(self, name, attrs): if name == 'file': self.buf = [] def endElement(self, name): if name == 'file': self.files.append(str(''.join(self.buf))) def characters(self, cars): self.buf.append(cars) @extension(*EXT_RCC) def create_rcc_task(self, node): "Creates rcc and cxx tasks for ``.qrc`` files" rcnode = node.change_ext('_rc.%d.cpp' % self.idx) self.create_task('rcc', node, rcnode) cpptask = self.create_task('cxx', rcnode, rcnode.change_ext('.o')) try: self.compiled_tasks.append(cpptask) except AttributeError: self.compiled_tasks = [cpptask] return cpptask @extension(*EXT_UI) def create_uic_task(self, node): "Create uic tasks for user interface ``.ui`` definition files" """ If UIC file is used in more than one bld, we would have a conflict in parallel execution It is not possible to change the file names (like .self.idx. as for objects) as they have to be referenced by the source file, but we can assume that the transformation will be identical and the tasks can be shared in a global cache. """ try: uic_cache = self.bld.uic_cache except AttributeError: uic_cache = self.bld.uic_cache = {} if node not in uic_cache: uictask = uic_cache[node] = self.create_task('ui5', node) uictask.outputs = [node.parent.find_or_declare(self.env.ui_PATTERN % node.name[:-3])] @extension('.ts') def add_lang(self, node): """Adds all the .ts file into ``self.lang``""" self.lang = self.to_list(getattr(self, 'lang', [])) + [node] @feature('qt5') @before_method('process_source') def process_mocs(self): """ Processes MOC files included in headers:: def build(bld): bld.program(features='qt5', source='main.cpp', target='app', use='QT5CORE', moc='foo.h') The build will run moc on foo.h to create moc_foo.n.cpp. The number in the file name is provided to avoid name clashes when the same headers are used by several targets. """ lst = self.to_nodes(getattr(self, 'moc', [])) self.source = self.to_list(getattr(self, 'source', [])) for x in lst: prefix = x.name[:x.name.rfind('.')] # foo.h -> foo moc_target = 'moc_%s.%d.cpp' % (prefix, self.idx) moc_node = x.parent.find_or_declare(moc_target) self.source.append(moc_node) self.create_task('moc', x, moc_node) @feature('qt5') @after_method('apply_link') def apply_qt5(self): """ Adds MOC_FLAGS which may be necessary for moc:: def build(bld): bld.program(features='qt5', source='main.cpp', target='app', use='QT5CORE') The additional parameters are: :param lang: list of translation files (\\*.ts) to process :type lang: list of :py:class:`waflib.Node.Node` or string without the .ts extension :param update: whether to process the C++ files to update the \\*.ts files (use **waf --translate**) :type update: bool :param langname: if given, transform the \\*.ts files into a .qrc files to include in the binary file :type langname: :py:class:`waflib.Node.Node` or string without the .qrc extension """ if getattr(self, 'lang', None): qmtasks = [] for x in self.to_list(self.lang): if isinstance(x, str): x = self.path.find_resource(x + '.ts') qmtasks.append(self.create_task('ts2qm', x, x.change_ext('.%d.qm' % self.idx))) if getattr(self, 'update', None) and Options.options.trans_qt5: cxxnodes = [a.inputs[0] for a in self.compiled_tasks] + [ a.inputs[0] for a in self.tasks if a.inputs and a.inputs[0].name.endswith('.ui')] for x in qmtasks: self.create_task('trans_update', cxxnodes, x.inputs) if getattr(self, 'langname', None): qmnodes = [x.outputs[0] for x in qmtasks] rcnode = self.langname if isinstance(rcnode, str): rcnode = self.path.find_or_declare(rcnode + ('.%d.qrc' % self.idx)) t = self.create_task('qm2rcc', qmnodes, rcnode) k = create_rcc_task(self, t.outputs[0]) self.link_task.inputs.append(k.outputs[0]) lst = [] for flag in self.to_list(self.env.CXXFLAGS): if len(flag) < 2: continue f = flag[0:2] if f in ('-D', '-I', '/D', '/I'): if (f[0] == '/'): lst.append('-' + flag[1:]) else: lst.append(flag) self.env.append_value('MOC_FLAGS', lst) @extension(*EXT_QT5) def cxx_hook(self, node): """ Re-maps C++ file extensions to the :py:class:`waflib.Tools.qt5.qxx` task. """ return self.create_compiled_task('qxx', node) class rcc(Task.Task): """ Processes ``.qrc`` files """ color = 'BLUE' run_str = '${QT_RCC} -name ${tsk.rcname()} ${SRC[0].abspath()} ${RCC_ST} -o ${TGT}' ext_out = ['.h'] def rcname(self): return os.path.splitext(self.inputs[0].name)[0] def scan(self): """Parse the *.qrc* files""" if not has_xml: Logs.error('No xml.sax support was found, rcc dependencies will be incomplete!') return ([], []) parser = make_parser() curHandler = XMLHandler() parser.setContentHandler(curHandler) with open(self.inputs[0].abspath(), 'r') as f: parser.parse(f) nodes = [] names = [] root = self.inputs[0].parent for x in curHandler.files: nd = root.find_resource(x) if nd: nodes.append(nd) else: names.append(x) return (nodes, names) def quote_flag(self, x): """ Override Task.quote_flag. QT parses the argument files differently than cl.exe and link.exe :param x: flag :type x: string :return: quoted flag :rtype: string """ return x class moc(Task.Task): """ Creates ``.moc`` files """ color = 'BLUE' run_str = '${QT_MOC} ${MOC_FLAGS} ${MOCCPPPATH_ST:INCPATHS} ${MOCDEFINES_ST:DEFINES} ${SRC} ${MOC_ST} ${TGT}' def quote_flag(self, x): """ Override Task.quote_flag. QT parses the argument files differently than cl.exe and link.exe :param x: flag :type x: string :return: quoted flag :rtype: string """ return x class ui5(Task.Task): """ Processes ``.ui`` files """ color = 'BLUE' run_str = '${QT_UIC} ${SRC} -o ${TGT}' ext_out = ['.h'] class ts2qm(Task.Task): """ Generates ``.qm`` files from ``.ts`` files """ color = 'BLUE' run_str = '${QT_LRELEASE} ${QT_LRELEASE_FLAGS} ${SRC} -qm ${TGT}' class qm2rcc(Task.Task): """ Generates ``.qrc`` files from ``.qm`` files """ color = 'BLUE' after = 'ts2qm' def run(self): """Create a qrc file including the inputs""" txt = '\n'.join(['%s' % k.path_from(self.outputs[0].parent) for k in self.inputs]) code = '\n\n%s\n\n' % txt self.outputs[0].write(code) def configure(self): """ Besides the configuration options, the environment variable QT5_ROOT may be used to give the location of the qt5 libraries (absolute path). The detection uses the program ``pkg-config`` through :py:func:`waflib.Tools.config_c.check_cfg` """ self.find_qt5_binaries() self.set_qt5_libs_dir() self.set_qt5_libs_to_check() self.set_qt5_defines() self.find_qt5_libraries() self.add_qt5_rpath() self.simplify_qt5_libs() # warn about this during the configuration too if not has_xml: Logs.error('No xml.sax support was found, rcc dependencies will be incomplete!') if 'COMPILER_CXX' not in self.env: self.fatal('No CXX compiler defined: did you forget to configure compiler_cxx first?') # Qt5 may be compiled with '-reduce-relocations' which requires dependent programs to have -fPIE or -fPIC? frag = '#include \nint main(int argc, char **argv) {return 0;}\n' uses = 'QT5CORE QT5WIDGETS QT5GUI' for flag in [[], '-fPIE', '-fPIC', '-std=c++11' , ['-std=c++11', '-fPIE'], ['-std=c++11', '-fPIC']]: msg = 'See if Qt files compile ' if flag: msg += 'with %s' % flag try: self.check(features='qt5 cxx', use=uses, uselib_store='qt5', cxxflags=flag, fragment=frag, msg=msg) except self.errors.ConfigurationError: pass else: break else: self.fatal('Could not build a simple Qt application') # FreeBSD does not add /usr/local/lib and the pkg-config files do not provide it either :-/ if Utils.unversioned_sys_platform() == 'freebsd': frag = '#include \nint main(int argc, char **argv) { QApplication app(argc, argv); return NULL != (void*) (&app);}\n' try: self.check(features='qt5 cxx cxxprogram', use=uses, fragment=frag, msg='Can we link Qt programs on FreeBSD directly?') except self.errors.ConfigurationError: self.check(features='qt5 cxx cxxprogram', use=uses, uselib_store='qt5', libpath='/usr/local/lib', fragment=frag, msg='Is /usr/local/lib required?') @conf def find_qt5_binaries(self): """ Detects Qt programs such as qmake, moc, uic, lrelease """ env = self.env opt = Options.options qtdir = getattr(opt, 'qtdir', '') qtbin = getattr(opt, 'qtbin', '') paths = [] if qtdir: qtbin = os.path.join(qtdir, 'bin') # the qt directory has been given from QT5_ROOT - deduce the qt binary path if not qtdir: qtdir = self.environ.get('QT5_ROOT', '') qtbin = self.environ.get('QT5_BIN') or os.path.join(qtdir, 'bin') if qtbin: paths = [qtbin] # no qtdir, look in the path and in /usr/local/Trolltech if not qtdir: paths = self.environ.get('PATH', '').split(os.pathsep) paths.extend(['/usr/share/qt5/bin', '/usr/local/lib/qt5/bin']) try: lst = Utils.listdir('/usr/local/Trolltech/') except OSError: pass else: if lst: lst.sort() lst.reverse() # keep the highest version qtdir = '/usr/local/Trolltech/%s/' % lst[0] qtbin = os.path.join(qtdir, 'bin') paths.append(qtbin) # at the end, try to find qmake in the paths given # keep the one with the highest version cand = None prev_ver = ['5', '0', '0'] for qmk in ('qmake-qt5', 'qmake5', 'qmake'): try: qmake = self.find_program(qmk, path_list=paths) except self.errors.ConfigurationError: pass else: try: version = self.cmd_and_log(qmake + ['-query', 'QT_VERSION']).strip() except self.errors.WafError: pass else: if version: new_ver = version.split('.') if new_ver > prev_ver: cand = qmake prev_ver = new_ver # qmake could not be found easily, rely on qtchooser if not cand: try: self.find_program('qtchooser') except self.errors.ConfigurationError: pass else: cmd = self.env.QTCHOOSER + ['-qt=5', '-run-tool=qmake'] try: version = self.cmd_and_log(cmd + ['-query', 'QT_VERSION']) except self.errors.WafError: pass else: cand = cmd if cand: self.env.QMAKE = cand else: self.fatal('Could not find qmake for qt5') self.env.QT_HOST_BINS = qtbin = self.cmd_and_log(self.env.QMAKE + ['-query', 'QT_HOST_BINS']).strip() paths.insert(0, qtbin) def find_bin(lst, var): if var in env: return for f in lst: try: ret = self.find_program(f, path_list=paths) except self.errors.ConfigurationError: pass else: env[var]=ret break find_bin(['uic-qt5', 'uic'], 'QT_UIC') if not env.QT_UIC: self.fatal('cannot find the uic compiler for qt5') self.start_msg('Checking for uic version') uicver = self.cmd_and_log(env.QT_UIC + ['-version'], output=Context.BOTH) uicver = ''.join(uicver).strip() uicver = uicver.replace('Qt User Interface Compiler ','').replace('User Interface Compiler for Qt', '') self.end_msg(uicver) if uicver.find(' 3.') != -1 or uicver.find(' 4.') != -1: self.fatal('this uic compiler is for qt3 or qt4, add uic for qt5 to your path') find_bin(['moc-qt5', 'moc'], 'QT_MOC') find_bin(['rcc-qt5', 'rcc'], 'QT_RCC') find_bin(['lrelease-qt5', 'lrelease'], 'QT_LRELEASE') find_bin(['lupdate-qt5', 'lupdate'], 'QT_LUPDATE') env.UIC_ST = '%s -o %s' env.MOC_ST = '-o' env.ui_PATTERN = 'ui_%s.h' env.QT_LRELEASE_FLAGS = ['-silent'] env.MOCCPPPATH_ST = '-I%s' env.MOCDEFINES_ST = '-D%s' @conf def set_qt5_libs_dir(self): env = self.env qtlibs = getattr(Options.options, 'qtlibs', None) or self.environ.get('QT5_LIBDIR') if not qtlibs: try: qtlibs = self.cmd_and_log(env.QMAKE + ['-query', 'QT_INSTALL_LIBS']).strip() except Errors.WafError: qtdir = self.cmd_and_log(env.QMAKE + ['-query', 'QT_INSTALL_PREFIX']).strip() qtlibs = os.path.join(qtdir, 'lib') self.msg('Found the Qt5 libraries in', qtlibs) env.QTLIBS = qtlibs @conf def find_single_qt5_lib(self, name, uselib, qtlibs, qtincludes, force_static): env = self.env if force_static: exts = ('.a', '.lib') prefix = 'STLIB' else: exts = ('.so', '.lib') prefix = 'LIB' def lib_names(): for x in exts: for k in ('', '5') if Utils.is_win32 else ['']: for p in ('lib', ''): yield (p, name, k, x) for tup in lib_names(): k = ''.join(tup) path = os.path.join(qtlibs, k) if os.path.exists(path): if env.DEST_OS == 'win32': libval = ''.join(tup[:-1]) else: libval = name env.append_unique(prefix + '_' + uselib, libval) env.append_unique('%sPATH_%s' % (prefix, uselib), qtlibs) env.append_unique('INCLUDES_' + uselib, qtincludes) env.append_unique('INCLUDES_' + uselib, os.path.join(qtincludes, name.replace('Qt5', 'Qt'))) return k return False @conf def find_qt5_libraries(self): env = self.env qtincludes = self.environ.get('QT5_INCLUDES') or self.cmd_and_log(env.QMAKE + ['-query', 'QT_INSTALL_HEADERS']).strip() force_static = self.environ.get('QT5_FORCE_STATIC') try: if self.environ.get('QT5_XCOMPILE'): self.fatal('QT5_XCOMPILE Disables pkg-config detection') self.check_cfg(atleast_pkgconfig_version='0.1') except self.errors.ConfigurationError: for i in self.qt5_vars: uselib = i.upper() if Utils.unversioned_sys_platform() == 'darwin': # Since at least qt 4.7.3 each library locates in separate directory fwk = i.replace('Qt5', 'Qt') frameworkName = fwk + '.framework' qtDynamicLib = os.path.join(env.QTLIBS, frameworkName, fwk) if os.path.exists(qtDynamicLib): env.append_unique('FRAMEWORK_' + uselib, fwk) env.append_unique('FRAMEWORKPATH_' + uselib, env.QTLIBS) self.msg('Checking for %s' % i, qtDynamicLib, 'GREEN') else: self.msg('Checking for %s' % i, False, 'YELLOW') env.append_unique('INCLUDES_' + uselib, os.path.join(env.QTLIBS, frameworkName, 'Headers')) else: ret = self.find_single_qt5_lib(i, uselib, env.QTLIBS, qtincludes, force_static) if not force_static and not ret: ret = self.find_single_qt5_lib(i, uselib, env.QTLIBS, qtincludes, True) self.msg('Checking for %s' % i, ret, 'GREEN' if ret else 'YELLOW') else: path = '%s:%s:%s/pkgconfig:/usr/lib/qt5/lib/pkgconfig:/opt/qt5/lib/pkgconfig:/usr/lib/qt5/lib:/opt/qt5/lib' % ( self.environ.get('PKG_CONFIG_PATH', ''), env.QTLIBS, env.QTLIBS) for i in self.qt5_vars: self.check_cfg(package=i, args='--cflags --libs', mandatory=False, force_static=force_static, pkg_config_path=path) @conf def simplify_qt5_libs(self): """ Since library paths make really long command-lines, and since everything depends on qtcore, remove the qtcore ones from qtgui, etc """ env = self.env def process_lib(vars_, coreval): for d in vars_: var = d.upper() if var == 'QTCORE': continue value = env['LIBPATH_'+var] if value: core = env[coreval] accu = [] for lib in value: if lib in core: continue accu.append(lib) env['LIBPATH_'+var] = accu process_lib(self.qt5_vars, 'LIBPATH_QTCORE') @conf def add_qt5_rpath(self): """ Defines rpath entries for Qt libraries """ env = self.env if getattr(Options.options, 'want_rpath', False): def process_rpath(vars_, coreval): for d in vars_: var = d.upper() value = env['LIBPATH_' + var] if value: core = env[coreval] accu = [] for lib in value: if var != 'QTCORE': if lib in core: continue accu.append('-Wl,--rpath='+lib) env['RPATH_' + var] = accu process_rpath(self.qt5_vars, 'LIBPATH_QTCORE') @conf def set_qt5_libs_to_check(self): self.qt5_vars = Utils.to_list(getattr(self, 'qt5_vars', [])) if not self.qt5_vars: dirlst = Utils.listdir(self.env.QTLIBS) pat = self.env.cxxshlib_PATTERN if Utils.is_win32: pat = pat.replace('.dll', '.lib') if self.environ.get('QT5_FORCE_STATIC'): pat = self.env.cxxstlib_PATTERN if Utils.unversioned_sys_platform() == 'darwin': pat = r"%s\.framework" re_qt = re.compile(pat%'Qt5?(?P.*)'+'$') for x in dirlst: m = re_qt.match(x) if m: self.qt5_vars.append("Qt5%s" % m.group('name')) if not self.qt5_vars: self.fatal('cannot find any Qt5 library (%r)' % self.env.QTLIBS) qtextralibs = getattr(Options.options, 'qtextralibs', None) if qtextralibs: self.qt5_vars.extend(qtextralibs.split(',')) @conf def set_qt5_defines(self): if sys.platform != 'win32': return for x in self.qt5_vars: y=x.replace('Qt5', 'Qt')[2:].upper() self.env.append_unique('DEFINES_%s' % x.upper(), 'QT_%s_LIB' % y) def options(opt): """ Command-line options """ opt.add_option('--want-rpath', action='store_true', default=False, dest='want_rpath', help='enable the rpath for qt libraries') for i in 'qtdir qtbin qtlibs'.split(): opt.add_option('--'+i, type='string', default='', dest=i) opt.add_option('--translate', action='store_true', help='collect translation strings', dest='trans_qt5', default=False) opt.add_option('--qtextralibs', type='string', default='', dest='qtextralibs', help='additional qt libraries on the system to add to default ones, comma separated') tdb-1.4.2/third_party/waf/waflib/Tools/ruby.py0000660000000000000000000001271313444661622021244 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # daniel.svensson at purplescout.se 2008 # Thomas Nagy 2016-2018 (ita) """ Support for Ruby extensions. A C/C++ compiler is required:: def options(opt): opt.load('compiler_c ruby') def configure(conf): conf.load('compiler_c ruby') conf.check_ruby_version((1,8,0)) conf.check_ruby_ext_devel() conf.check_ruby_module('libxml') def build(bld): bld( features = 'c cshlib rubyext', source = 'rb_mytest.c', target = 'mytest_ext', install_path = '${ARCHDIR_RUBY}') bld.install_files('${LIBDIR_RUBY}', 'Mytest.rb') """ import os from waflib import Errors, Options, Task, Utils from waflib.TaskGen import before_method, feature, extension from waflib.Configure import conf @feature('rubyext') @before_method('apply_incpaths', 'process_source', 'apply_bundle', 'apply_link') def init_rubyext(self): """ Add required variables for ruby extensions """ self.install_path = '${ARCHDIR_RUBY}' self.uselib = self.to_list(getattr(self, 'uselib', '')) if not 'RUBY' in self.uselib: self.uselib.append('RUBY') if not 'RUBYEXT' in self.uselib: self.uselib.append('RUBYEXT') @feature('rubyext') @before_method('apply_link', 'propagate_uselib_vars') def apply_ruby_so_name(self): """ Strip the *lib* prefix from ruby extensions """ self.env.cshlib_PATTERN = self.env.cxxshlib_PATTERN = self.env.rubyext_PATTERN @conf def check_ruby_version(self, minver=()): """ Checks if ruby is installed. If installed the variable RUBY will be set in environment. The ruby binary can be overridden by ``--with-ruby-binary`` command-line option. """ ruby = self.find_program('ruby', var='RUBY', value=Options.options.rubybinary) try: version = self.cmd_and_log(ruby + ['-e', 'puts defined?(VERSION) ? VERSION : RUBY_VERSION']).strip() except Errors.WafError: self.fatal('could not determine ruby version') self.env.RUBY_VERSION = version try: ver = tuple(map(int, version.split('.'))) except Errors.WafError: self.fatal('unsupported ruby version %r' % version) cver = '' if minver: cver = '> ' + '.'.join(str(x) for x in minver) if ver < minver: self.fatal('ruby is too old %r' % ver) self.msg('Checking for ruby version %s' % cver, version) @conf def check_ruby_ext_devel(self): """ Check if a ruby extension can be created """ if not self.env.RUBY: self.fatal('ruby detection is required first') if not self.env.CC_NAME and not self.env.CXX_NAME: self.fatal('load a c/c++ compiler first') version = tuple(map(int, self.env.RUBY_VERSION.split("."))) def read_out(cmd): return Utils.to_list(self.cmd_and_log(self.env.RUBY + ['-rrbconfig', '-e', cmd])) def read_config(key): return read_out('puts RbConfig::CONFIG[%r]' % key) cpppath = archdir = read_config('archdir') if version >= (1, 9, 0): ruby_hdrdir = read_config('rubyhdrdir') cpppath += ruby_hdrdir if version >= (2, 0, 0): cpppath += read_config('rubyarchhdrdir') cpppath += [os.path.join(ruby_hdrdir[0], read_config('arch')[0])] self.check(header_name='ruby.h', includes=cpppath, errmsg='could not find ruby header file', link_header_test=False) self.env.LIBPATH_RUBYEXT = read_config('libdir') self.env.LIBPATH_RUBYEXT += archdir self.env.INCLUDES_RUBYEXT = cpppath self.env.CFLAGS_RUBYEXT = read_config('CCDLFLAGS') self.env.rubyext_PATTERN = '%s.' + read_config('DLEXT')[0] # ok this is really stupid, but the command and flags are combined. # so we try to find the first argument... flags = read_config('LDSHARED') while flags and flags[0][0] != '-': flags = flags[1:] # we also want to strip out the deprecated ppc flags if len(flags) > 1 and flags[1] == "ppc": flags = flags[2:] self.env.LINKFLAGS_RUBYEXT = flags self.env.LINKFLAGS_RUBYEXT += read_config('LIBS') self.env.LINKFLAGS_RUBYEXT += read_config('LIBRUBYARG_SHARED') if Options.options.rubyarchdir: self.env.ARCHDIR_RUBY = Options.options.rubyarchdir else: self.env.ARCHDIR_RUBY = read_config('sitearchdir')[0] if Options.options.rubylibdir: self.env.LIBDIR_RUBY = Options.options.rubylibdir else: self.env.LIBDIR_RUBY = read_config('sitelibdir')[0] @conf def check_ruby_module(self, module_name): """ Check if the selected ruby interpreter can require the given ruby module:: def configure(conf): conf.check_ruby_module('libxml') :param module_name: module :type module_name: string """ self.start_msg('Ruby module %s' % module_name) try: self.cmd_and_log(self.env.RUBY + ['-e', 'require \'%s\';puts 1' % module_name]) except Errors.WafError: self.end_msg(False) self.fatal('Could not find the ruby module %r' % module_name) self.end_msg(True) @extension('.rb') def process(self, node): return self.create_task('run_ruby', node) class run_ruby(Task.Task): """ Task to run ruby files detected by file extension .rb:: def options(opt): opt.load('ruby') def configure(ctx): ctx.check_ruby_version() def build(bld): bld.env.RBFLAGS = '-e puts "hello world"' bld(source='a_ruby_file.rb') """ run_str = '${RUBY} ${RBFLAGS} -I ${SRC[0].parent.abspath()} ${SRC}' def options(opt): """ Add the ``--with-ruby-archdir``, ``--with-ruby-libdir`` and ``--with-ruby-binary`` options """ opt.add_option('--with-ruby-archdir', type='string', dest='rubyarchdir', help='Specify directory where to install arch specific files') opt.add_option('--with-ruby-libdir', type='string', dest='rubylibdir', help='Specify alternate ruby library path') opt.add_option('--with-ruby-binary', type='string', dest='rubybinary', help='Specify alternate ruby binary') tdb-1.4.2/third_party/waf/waflib/Tools/suncc.py0000660000000000000000000000272313444661622021376 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2018 (ita) # Ralf Habacker, 2006 (rh) from waflib import Errors from waflib.Tools import ccroot, ar from waflib.Configure import conf @conf def find_scc(conf): """ Detects the Sun C compiler """ v = conf.env cc = conf.find_program('cc', var='CC') try: conf.cmd_and_log(cc + ['-flags']) except Errors.WafError: conf.fatal('%r is not a Sun compiler' % cc) v.CC_NAME = 'sun' conf.get_suncc_version(cc) @conf def scc_common_flags(conf): """ Flags required for executing the sun C compiler """ v = conf.env v.CC_SRC_F = [] v.CC_TGT_F = ['-c', '-o', ''] if not v.LINK_CC: v.LINK_CC = v.CC v.CCLNK_SRC_F = '' v.CCLNK_TGT_F = ['-o', ''] v.CPPPATH_ST = '-I%s' v.DEFINES_ST = '-D%s' v.LIB_ST = '-l%s' # template for adding libs v.LIBPATH_ST = '-L%s' # template for adding libpaths v.STLIB_ST = '-l%s' v.STLIBPATH_ST = '-L%s' v.SONAME_ST = '-Wl,-h,%s' v.SHLIB_MARKER = '-Bdynamic' v.STLIB_MARKER = '-Bstatic' v.cprogram_PATTERN = '%s' v.CFLAGS_cshlib = ['-xcode=pic32', '-DPIC'] v.LINKFLAGS_cshlib = ['-G'] v.cshlib_PATTERN = 'lib%s.so' v.LINKFLAGS_cstlib = ['-Bstatic'] v.cstlib_PATTERN = 'lib%s.a' def configure(conf): conf.find_scc() conf.find_ar() conf.scc_common_flags() conf.cc_load_tools() conf.cc_add_flags() conf.link_add_flags() tdb-1.4.2/third_party/waf/waflib/Tools/suncxx.py0000660000000000000000000000274713444661622021621 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2018 (ita) # Ralf Habacker, 2006 (rh) from waflib import Errors from waflib.Tools import ccroot, ar from waflib.Configure import conf @conf def find_sxx(conf): """ Detects the sun C++ compiler """ v = conf.env cc = conf.find_program(['CC', 'c++'], var='CXX') try: conf.cmd_and_log(cc + ['-flags']) except Errors.WafError: conf.fatal('%r is not a Sun compiler' % cc) v.CXX_NAME = 'sun' conf.get_suncc_version(cc) @conf def sxx_common_flags(conf): """ Flags required for executing the sun C++ compiler """ v = conf.env v.CXX_SRC_F = [] v.CXX_TGT_F = ['-c', '-o', ''] if not v.LINK_CXX: v.LINK_CXX = v.CXX v.CXXLNK_SRC_F = [] v.CXXLNK_TGT_F = ['-o', ''] v.CPPPATH_ST = '-I%s' v.DEFINES_ST = '-D%s' v.LIB_ST = '-l%s' # template for adding libs v.LIBPATH_ST = '-L%s' # template for adding libpaths v.STLIB_ST = '-l%s' v.STLIBPATH_ST = '-L%s' v.SONAME_ST = '-Wl,-h,%s' v.SHLIB_MARKER = '-Bdynamic' v.STLIB_MARKER = '-Bstatic' v.cxxprogram_PATTERN = '%s' v.CXXFLAGS_cxxshlib = ['-xcode=pic32', '-DPIC'] v.LINKFLAGS_cxxshlib = ['-G'] v.cxxshlib_PATTERN = 'lib%s.so' v.LINKFLAGS_cxxstlib = ['-Bstatic'] v.cxxstlib_PATTERN = 'lib%s.a' def configure(conf): conf.find_sxx() conf.find_ar() conf.sxx_common_flags() conf.cxx_load_tools() conf.cxx_add_flags() conf.link_add_flags() tdb-1.4.2/third_party/waf/waflib/Tools/tex.py0000660000000000000000000003573113444661622021070 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2018 (ita) """ TeX/LaTeX/PDFLaTeX/XeLaTeX support Example:: def configure(conf): conf.load('tex') if not conf.env.LATEX: conf.fatal('The program LaTex is required') def build(bld): bld( features = 'tex', type = 'latex', # pdflatex or xelatex source = 'document.ltx', # mandatory, the source outs = 'ps', # 'pdf' or 'ps pdf' deps = 'crossreferencing.lst', # to give dependencies directly prompt = 1, # 0 for the batch mode ) Notes: - To configure with a special program, use:: $ PDFLATEX=luatex waf configure - This tool does not use the target attribute of the task generator (``bld(target=...)``); the target file name is built from the source base name and the output type(s) """ import os, re from waflib import Utils, Task, Errors, Logs, Node from waflib.TaskGen import feature, before_method re_bibunit = re.compile(r'\\(?Pputbib)\[(?P[^\[\]]*)\]',re.M) def bibunitscan(self): """ Parses TeX inputs and try to find the *bibunit* file dependencies :return: list of bibunit files :rtype: list of :py:class:`waflib.Node.Node` """ node = self.inputs[0] nodes = [] if not node: return nodes code = node.read() for match in re_bibunit.finditer(code): path = match.group('file') if path: found = None for k in ('', '.bib'): # add another loop for the tex include paths? Logs.debug('tex: trying %s%s', path, k) fi = node.parent.find_resource(path + k) if fi: found = True nodes.append(fi) # no break if not found: Logs.debug('tex: could not find %s', path) Logs.debug('tex: found the following bibunit files: %s', nodes) return nodes exts_deps_tex = ['', '.ltx', '.tex', '.bib', '.pdf', '.png', '.eps', '.ps', '.sty'] """List of typical file extensions included in latex files""" exts_tex = ['.ltx', '.tex'] """List of typical file extensions that contain latex""" re_tex = re.compile(r'\\(?Pusepackage|RequirePackage|include|bibliography([^\[\]{}]*)|putbib|includegraphics|input|import|bringin|lstinputlisting)(\[[^\[\]]*\])?{(?P[^{}]*)}',re.M) """Regexp for expressions that may include latex files""" g_bibtex_re = re.compile('bibdata', re.M) """Regexp for bibtex files""" g_glossaries_re = re.compile('\\@newglossary', re.M) """Regexp for expressions that create glossaries""" class tex(Task.Task): """ Compiles a tex/latex file. .. inheritance-diagram:: waflib.Tools.tex.latex waflib.Tools.tex.xelatex waflib.Tools.tex.pdflatex """ bibtex_fun, _ = Task.compile_fun('${BIBTEX} ${BIBTEXFLAGS} ${SRCFILE}', shell=False) bibtex_fun.__doc__ = """ Execute the program **bibtex** """ makeindex_fun, _ = Task.compile_fun('${MAKEINDEX} ${MAKEINDEXFLAGS} ${SRCFILE}', shell=False) makeindex_fun.__doc__ = """ Execute the program **makeindex** """ makeglossaries_fun, _ = Task.compile_fun('${MAKEGLOSSARIES} ${SRCFILE}', shell=False) makeglossaries_fun.__doc__ = """ Execute the program **makeglossaries** """ def exec_command(self, cmd, **kw): """ Executes TeX commands without buffering (latex may prompt for inputs) :return: the return code :rtype: int """ if self.env.PROMPT_LATEX: # capture the outputs in configuration tests kw['stdout'] = kw['stderr'] = None return super(tex, self).exec_command(cmd, **kw) def scan_aux(self, node): """ Recursive regex-based scanner that finds included auxiliary files. """ nodes = [node] re_aux = re.compile(r'\\@input{(?P[^{}]*)}', re.M) def parse_node(node): code = node.read() for match in re_aux.finditer(code): path = match.group('file') found = node.parent.find_or_declare(path) if found and found not in nodes: Logs.debug('tex: found aux node %r', found) nodes.append(found) parse_node(found) parse_node(node) return nodes def scan(self): """ Recursive regex-based scanner that finds latex dependencies. It uses :py:attr:`waflib.Tools.tex.re_tex` Depending on your needs you might want: * to change re_tex:: from waflib.Tools import tex tex.re_tex = myregex * or to change the method scan from the latex tasks:: from waflib.Task import classes classes['latex'].scan = myscanfunction """ node = self.inputs[0] nodes = [] names = [] seen = [] if not node: return (nodes, names) def parse_node(node): if node in seen: return seen.append(node) code = node.read() for match in re_tex.finditer(code): multibib = match.group('type') if multibib and multibib.startswith('bibliography'): multibib = multibib[len('bibliography'):] if multibib.startswith('style'): continue else: multibib = None for path in match.group('file').split(','): if path: add_name = True found = None for k in exts_deps_tex: # issue 1067, scan in all texinputs folders for up in self.texinputs_nodes: Logs.debug('tex: trying %s%s', path, k) found = up.find_resource(path + k) if found: break for tsk in self.generator.tasks: if not found or found in tsk.outputs: break else: nodes.append(found) add_name = False for ext in exts_tex: if found.name.endswith(ext): parse_node(found) break # multibib stuff if found and multibib and found.name.endswith('.bib'): try: self.multibibs.append(found) except AttributeError: self.multibibs = [found] # no break, people are crazy if add_name: names.append(path) parse_node(node) for x in nodes: x.parent.get_bld().mkdir() Logs.debug("tex: found the following : %s and names %s", nodes, names) return (nodes, names) def check_status(self, msg, retcode): """ Checks an exit status and raise an error with a particular message :param msg: message to display if the code is non-zero :type msg: string :param retcode: condition :type retcode: boolean """ if retcode != 0: raise Errors.WafError('%r command exit status %r' % (msg, retcode)) def info(self, *k, **kw): try: info = self.generator.bld.conf.logger.info except AttributeError: info = Logs.info info(*k, **kw) def bibfile(self): """ Parses *.aux* files to find bibfiles to process. If present, execute :py:meth:`waflib.Tools.tex.tex.bibtex_fun` """ for aux_node in self.aux_nodes: try: ct = aux_node.read() except EnvironmentError: Logs.error('Error reading %s: %r', aux_node.abspath()) continue if g_bibtex_re.findall(ct): self.info('calling bibtex') self.env.env = {} self.env.env.update(os.environ) self.env.env.update({'BIBINPUTS': self.texinputs(), 'BSTINPUTS': self.texinputs()}) self.env.SRCFILE = aux_node.name[:-4] self.check_status('error when calling bibtex', self.bibtex_fun()) for node in getattr(self, 'multibibs', []): self.env.env = {} self.env.env.update(os.environ) self.env.env.update({'BIBINPUTS': self.texinputs(), 'BSTINPUTS': self.texinputs()}) self.env.SRCFILE = node.name[:-4] self.check_status('error when calling bibtex', self.bibtex_fun()) def bibunits(self): """ Parses *.aux* file to find bibunit files. If there are bibunit files, runs :py:meth:`waflib.Tools.tex.tex.bibtex_fun`. """ try: bibunits = bibunitscan(self) except OSError: Logs.error('error bibunitscan') else: if bibunits: fn = ['bu' + str(i) for i in range(1, len(bibunits) + 1)] if fn: self.info('calling bibtex on bibunits') for f in fn: self.env.env = {'BIBINPUTS': self.texinputs(), 'BSTINPUTS': self.texinputs()} self.env.SRCFILE = f self.check_status('error when calling bibtex', self.bibtex_fun()) def makeindex(self): """ Searches the filesystem for *.idx* files to process. If present, runs :py:meth:`waflib.Tools.tex.tex.makeindex_fun` """ self.idx_node = self.inputs[0].change_ext('.idx') try: idx_path = self.idx_node.abspath() os.stat(idx_path) except OSError: self.info('index file %s absent, not calling makeindex', idx_path) else: self.info('calling makeindex') self.env.SRCFILE = self.idx_node.name self.env.env = {} self.check_status('error when calling makeindex %s' % idx_path, self.makeindex_fun()) def bibtopic(self): """ Lists additional .aux files from the bibtopic package """ p = self.inputs[0].parent.get_bld() if os.path.exists(os.path.join(p.abspath(), 'btaux.aux')): self.aux_nodes += p.ant_glob('*[0-9].aux') def makeglossaries(self): """ Lists additional glossaries from .aux files. If present, runs the makeglossaries program. """ src_file = self.inputs[0].abspath() base_file = os.path.basename(src_file) base, _ = os.path.splitext(base_file) for aux_node in self.aux_nodes: try: ct = aux_node.read() except EnvironmentError: Logs.error('Error reading %s: %r', aux_node.abspath()) continue if g_glossaries_re.findall(ct): if not self.env.MAKEGLOSSARIES: raise Errors.WafError("The program 'makeglossaries' is missing!") Logs.warn('calling makeglossaries') self.env.SRCFILE = base self.check_status('error when calling makeglossaries %s' % base, self.makeglossaries_fun()) return def texinputs(self): """ Returns the list of texinput nodes as a string suitable for the TEXINPUTS environment variables :rtype: string """ return os.pathsep.join([k.abspath() for k in self.texinputs_nodes]) + os.pathsep def run(self): """ Runs the whole TeX build process Multiple passes are required depending on the usage of cross-references, bibliographies, glossaries, indexes and additional contents The appropriate TeX compiler is called until the *.aux* files stop changing. """ env = self.env if not env.PROMPT_LATEX: env.append_value('LATEXFLAGS', '-interaction=batchmode') env.append_value('PDFLATEXFLAGS', '-interaction=batchmode') env.append_value('XELATEXFLAGS', '-interaction=batchmode') # important, set the cwd for everybody self.cwd = self.inputs[0].parent.get_bld() self.info('first pass on %s', self.__class__.__name__) # Hash .aux files before even calling the LaTeX compiler cur_hash = self.hash_aux_nodes() self.call_latex() # Find the .aux files again since bibtex processing can require it self.hash_aux_nodes() self.bibtopic() self.bibfile() self.bibunits() self.makeindex() self.makeglossaries() for i in range(10): # There is no need to call latex again if the .aux hash value has not changed prev_hash = cur_hash cur_hash = self.hash_aux_nodes() if not cur_hash: Logs.error('No aux.h to process') if cur_hash and cur_hash == prev_hash: break # run the command self.info('calling %s', self.__class__.__name__) self.call_latex() def hash_aux_nodes(self): """ Returns a hash of the .aux file contents :rtype: string or bytes """ try: self.aux_nodes except AttributeError: try: self.aux_nodes = self.scan_aux(self.inputs[0].change_ext('.aux')) except IOError: return None return Utils.h_list([Utils.h_file(x.abspath()) for x in self.aux_nodes]) def call_latex(self): """ Runs the TeX compiler once """ self.env.env = {} self.env.env.update(os.environ) self.env.env.update({'TEXINPUTS': self.texinputs()}) self.env.SRCFILE = self.inputs[0].abspath() self.check_status('error when calling latex', self.texfun()) class latex(tex): "Compiles LaTeX files" texfun, vars = Task.compile_fun('${LATEX} ${LATEXFLAGS} ${SRCFILE}', shell=False) class pdflatex(tex): "Compiles PdfLaTeX files" texfun, vars = Task.compile_fun('${PDFLATEX} ${PDFLATEXFLAGS} ${SRCFILE}', shell=False) class xelatex(tex): "XeLaTeX files" texfun, vars = Task.compile_fun('${XELATEX} ${XELATEXFLAGS} ${SRCFILE}', shell=False) class dvips(Task.Task): "Converts dvi files to postscript" run_str = '${DVIPS} ${DVIPSFLAGS} ${SRC} -o ${TGT}' color = 'BLUE' after = ['latex', 'pdflatex', 'xelatex'] class dvipdf(Task.Task): "Converts dvi files to pdf" run_str = '${DVIPDF} ${DVIPDFFLAGS} ${SRC} ${TGT}' color = 'BLUE' after = ['latex', 'pdflatex', 'xelatex'] class pdf2ps(Task.Task): "Converts pdf files to postscript" run_str = '${PDF2PS} ${PDF2PSFLAGS} ${SRC} ${TGT}' color = 'BLUE' after = ['latex', 'pdflatex', 'xelatex'] @feature('tex') @before_method('process_source') def apply_tex(self): """ Creates :py:class:`waflib.Tools.tex.tex` objects, and dvips/dvipdf/pdf2ps tasks if necessary (outs='ps', etc). """ if not getattr(self, 'type', None) in ('latex', 'pdflatex', 'xelatex'): self.type = 'pdflatex' outs = Utils.to_list(getattr(self, 'outs', [])) # prompt for incomplete files (else the batchmode is used) try: self.generator.bld.conf except AttributeError: default_prompt = False else: default_prompt = True self.env.PROMPT_LATEX = getattr(self, 'prompt', default_prompt) deps_lst = [] if getattr(self, 'deps', None): deps = self.to_list(self.deps) for dep in deps: if isinstance(dep, str): n = self.path.find_resource(dep) if not n: self.bld.fatal('Could not find %r for %r' % (dep, self)) if not n in deps_lst: deps_lst.append(n) elif isinstance(dep, Node.Node): deps_lst.append(dep) for node in self.to_nodes(self.source): if self.type == 'latex': task = self.create_task('latex', node, node.change_ext('.dvi')) elif self.type == 'pdflatex': task = self.create_task('pdflatex', node, node.change_ext('.pdf')) elif self.type == 'xelatex': task = self.create_task('xelatex', node, node.change_ext('.pdf')) task.env = self.env # add the manual dependencies if deps_lst: for n in deps_lst: if not n in task.dep_nodes: task.dep_nodes.append(n) # texinputs is a nasty beast if hasattr(self, 'texinputs_nodes'): task.texinputs_nodes = self.texinputs_nodes else: task.texinputs_nodes = [node.parent, node.parent.get_bld(), self.path, self.path.get_bld()] lst = os.environ.get('TEXINPUTS', '') if self.env.TEXINPUTS: lst += os.pathsep + self.env.TEXINPUTS if lst: lst = lst.split(os.pathsep) for x in lst: if x: if os.path.isabs(x): p = self.bld.root.find_node(x) if p: task.texinputs_nodes.append(p) else: Logs.error('Invalid TEXINPUTS folder %s', x) else: Logs.error('Cannot resolve relative paths in TEXINPUTS %s', x) if self.type == 'latex': if 'ps' in outs: tsk = self.create_task('dvips', task.outputs, node.change_ext('.ps')) tsk.env.env = dict(os.environ) if 'pdf' in outs: tsk = self.create_task('dvipdf', task.outputs, node.change_ext('.pdf')) tsk.env.env = dict(os.environ) elif self.type == 'pdflatex': if 'ps' in outs: self.create_task('pdf2ps', task.outputs, node.change_ext('.ps')) self.source = [] def configure(self): """ Find the programs tex, latex and others without raising errors. """ v = self.env for p in 'tex latex pdflatex xelatex bibtex dvips dvipdf ps2pdf makeindex pdf2ps makeglossaries'.split(): try: self.find_program(p, var=p.upper()) except self.errors.ConfigurationError: pass v.DVIPSFLAGS = '-Ppdf' tdb-1.4.2/third_party/waf/waflib/Tools/vala.py0000660000000000000000000002615513444661622021213 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Ali Sabil, 2007 # Radosław Szkodziński, 2010 """ At this point, vala is still unstable, so do not expect this tool to be too stable either (apis, etc) """ import re from waflib import Build, Context, Errors, Logs, Node, Options, Task, Utils from waflib.TaskGen import extension, taskgen_method from waflib.Configure import conf class valac(Task.Task): """ Compiles vala files """ #run_str = "${VALAC} ${VALAFLAGS}" # ideally #vars = ['VALAC_VERSION'] vars = ["VALAC", "VALAC_VERSION", "VALAFLAGS"] ext_out = ['.h'] def run(self): cmd = self.env.VALAC + self.env.VALAFLAGS resources = getattr(self, 'vala_exclude', []) cmd.extend([a.abspath() for a in self.inputs if a not in resources]) ret = self.exec_command(cmd, cwd=self.vala_dir_node.abspath()) if ret: return ret if self.generator.dump_deps_node: self.generator.dump_deps_node.write('\n'.join(self.generator.packages)) return ret @taskgen_method def init_vala_task(self): """ Initializes the vala task with the relevant data (acts as a constructor) """ self.profile = getattr(self, 'profile', 'gobject') self.packages = packages = Utils.to_list(getattr(self, 'packages', [])) self.use = Utils.to_list(getattr(self, 'use', [])) if packages and not self.use: self.use = packages[:] # copy if self.profile == 'gobject': if not 'GOBJECT' in self.use: self.use.append('GOBJECT') def addflags(flags): self.env.append_value('VALAFLAGS', flags) if self.profile: addflags('--profile=%s' % self.profile) valatask = self.valatask # output directory if hasattr(self, 'vala_dir'): if isinstance(self.vala_dir, str): valatask.vala_dir_node = self.path.get_bld().make_node(self.vala_dir) try: valatask.vala_dir_node.mkdir() except OSError: raise self.bld.fatal('Cannot create the vala dir %r' % valatask.vala_dir_node) else: valatask.vala_dir_node = self.vala_dir else: valatask.vala_dir_node = self.path.get_bld() addflags('--directory=%s' % valatask.vala_dir_node.abspath()) if hasattr(self, 'thread'): if self.profile == 'gobject': if not 'GTHREAD' in self.use: self.use.append('GTHREAD') else: #Vala doesn't have threading support for dova nor posix Logs.warn('Profile %s means no threading support', self.profile) self.thread = False if self.thread: addflags('--thread') self.is_lib = 'cprogram' not in self.features if self.is_lib: addflags('--library=%s' % self.target) h_node = valatask.vala_dir_node.find_or_declare('%s.h' % self.target) valatask.outputs.append(h_node) addflags('--header=%s' % h_node.name) valatask.outputs.append(valatask.vala_dir_node.find_or_declare('%s.vapi' % self.target)) if getattr(self, 'gir', None): gir_node = valatask.vala_dir_node.find_or_declare('%s.gir' % self.gir) addflags('--gir=%s' % gir_node.name) valatask.outputs.append(gir_node) self.vala_target_glib = getattr(self, 'vala_target_glib', getattr(Options.options, 'vala_target_glib', None)) if self.vala_target_glib: addflags('--target-glib=%s' % self.vala_target_glib) addflags(['--define=%s' % x for x in Utils.to_list(getattr(self, 'vala_defines', []))]) packages_private = Utils.to_list(getattr(self, 'packages_private', [])) addflags(['--pkg=%s' % x for x in packages_private]) def _get_api_version(): api_version = '1.0' if hasattr(Context.g_module, 'API_VERSION'): version = Context.g_module.API_VERSION.split(".") if version[0] == "0": api_version = "0." + version[1] else: api_version = version[0] + ".0" return api_version self.includes = Utils.to_list(getattr(self, 'includes', [])) valatask.install_path = getattr(self, 'install_path', '') valatask.vapi_path = getattr(self, 'vapi_path', '${DATAROOTDIR}/vala/vapi') valatask.pkg_name = getattr(self, 'pkg_name', self.env.PACKAGE) valatask.header_path = getattr(self, 'header_path', '${INCLUDEDIR}/%s-%s' % (valatask.pkg_name, _get_api_version())) valatask.install_binding = getattr(self, 'install_binding', True) self.vapi_dirs = vapi_dirs = Utils.to_list(getattr(self, 'vapi_dirs', [])) #includes = [] if hasattr(self, 'use'): local_packages = Utils.to_list(self.use)[:] # make sure to have a copy seen = [] while len(local_packages) > 0: package = local_packages.pop() if package in seen: continue seen.append(package) # check if the package exists try: package_obj = self.bld.get_tgen_by_name(package) except Errors.WafError: continue # in practice the other task is already processed # but this makes it explicit package_obj.post() package_name = package_obj.target task = getattr(package_obj, 'valatask', None) if task: for output in task.outputs: if output.name == package_name + ".vapi": valatask.set_run_after(task) if package_name not in packages: packages.append(package_name) if output.parent not in vapi_dirs: vapi_dirs.append(output.parent) if output.parent not in self.includes: self.includes.append(output.parent) if hasattr(package_obj, 'use'): lst = self.to_list(package_obj.use) lst.reverse() local_packages = [pkg for pkg in lst if pkg not in seen] + local_packages addflags(['--pkg=%s' % p for p in packages]) for vapi_dir in vapi_dirs: if isinstance(vapi_dir, Node.Node): v_node = vapi_dir else: v_node = self.path.find_dir(vapi_dir) if not v_node: Logs.warn('Unable to locate Vala API directory: %r', vapi_dir) else: addflags('--vapidir=%s' % v_node.abspath()) self.dump_deps_node = None if self.is_lib and self.packages: self.dump_deps_node = valatask.vala_dir_node.find_or_declare('%s.deps' % self.target) valatask.outputs.append(self.dump_deps_node) if self.is_lib and valatask.install_binding: headers_list = [o for o in valatask.outputs if o.suffix() == ".h"] if headers_list: self.install_vheader = self.add_install_files(install_to=valatask.header_path, install_from=headers_list) vapi_list = [o for o in valatask.outputs if (o.suffix() in (".vapi", ".deps"))] if vapi_list: self.install_vapi = self.add_install_files(install_to=valatask.vapi_path, install_from=vapi_list) gir_list = [o for o in valatask.outputs if o.suffix() == '.gir'] if gir_list: self.install_gir = self.add_install_files( install_to=getattr(self, 'gir_path', '${DATAROOTDIR}/gir-1.0'), install_from=gir_list) if hasattr(self, 'vala_resources'): nodes = self.to_nodes(self.vala_resources) valatask.vala_exclude = getattr(valatask, 'vala_exclude', []) + nodes valatask.inputs.extend(nodes) for x in nodes: addflags(['--gresources', x.abspath()]) @extension('.vala', '.gs') def vala_file(self, node): """ Compile a vala file and bind the task to *self.valatask*. If an existing vala task is already set, add the node to its inputs. The typical example is:: def build(bld): bld.program( packages = 'gtk+-2.0', target = 'vala-gtk-example', use = 'GTK GLIB', source = 'vala-gtk-example.vala foo.vala', vala_defines = ['DEBUG'] # adds --define= values to the command-line # the following arguments are for libraries #gir = 'hello-1.0', #gir_path = '/tmp', #vapi_path = '/tmp', #pkg_name = 'hello' # disable installing of gir, vapi and header #install_binding = False # profile = 'xyz' # adds --profile= to enable profiling # thread = True, # adds --thread, except if profile is on or not on 'gobject' # vala_target_glib = 'xyz' # adds --target-glib=, can be given through the command-line option --vala-target-glib= ) :param node: vala file :type node: :py:class:`waflib.Node.Node` """ try: valatask = self.valatask except AttributeError: valatask = self.valatask = self.create_task('valac') self.init_vala_task() valatask.inputs.append(node) name = node.name[:node.name.rfind('.')] + '.c' c_node = valatask.vala_dir_node.find_or_declare(name) valatask.outputs.append(c_node) self.source.append(c_node) @extension('.vapi') def vapi_file(self, node): try: valatask = self.valatask except AttributeError: valatask = self.valatask = self.create_task('valac') self.init_vala_task() valatask.inputs.append(node) @conf def find_valac(self, valac_name, min_version): """ Find the valac program, and execute it to store the version number in *conf.env.VALAC_VERSION* :param valac_name: program name :type valac_name: string or list of string :param min_version: minimum version acceptable :type min_version: tuple of int """ valac = self.find_program(valac_name, var='VALAC') try: output = self.cmd_and_log(valac + ['--version']) except Errors.WafError: valac_version = None else: ver = re.search(r'\d+.\d+.\d+', output).group().split('.') valac_version = tuple([int(x) for x in ver]) self.msg('Checking for %s version >= %r' % (valac_name, min_version), valac_version, valac_version and valac_version >= min_version) if valac and valac_version < min_version: self.fatal("%s version %r is too old, need >= %r" % (valac_name, valac_version, min_version)) self.env.VALAC_VERSION = valac_version return valac @conf def check_vala(self, min_version=(0,8,0), branch=None): """ Check if vala compiler from a given branch exists of at least a given version. :param min_version: minimum version acceptable (0.8.0) :type min_version: tuple :param branch: first part of the version number, in case a snapshot is used (0, 8) :type branch: tuple of int """ if self.env.VALA_MINVER: min_version = self.env.VALA_MINVER if self.env.VALA_MINVER_BRANCH: branch = self.env.VALA_MINVER_BRANCH if not branch: branch = min_version[:2] try: find_valac(self, 'valac-%d.%d' % (branch[0], branch[1]), min_version) except self.errors.ConfigurationError: find_valac(self, 'valac', min_version) @conf def check_vala_deps(self): """ Load the gobject and gthread packages if they are missing. """ if not self.env.HAVE_GOBJECT: pkg_args = {'package': 'gobject-2.0', 'uselib_store': 'GOBJECT', 'args': '--cflags --libs'} if getattr(Options.options, 'vala_target_glib', None): pkg_args['atleast_version'] = Options.options.vala_target_glib self.check_cfg(**pkg_args) if not self.env.HAVE_GTHREAD: pkg_args = {'package': 'gthread-2.0', 'uselib_store': 'GTHREAD', 'args': '--cflags --libs'} if getattr(Options.options, 'vala_target_glib', None): pkg_args['atleast_version'] = Options.options.vala_target_glib self.check_cfg(**pkg_args) def configure(self): """ Use the following to enforce minimum vala version:: def configure(conf): conf.env.VALA_MINVER = (0, 10, 0) conf.load('vala') """ self.load('gnu_dirs') self.check_vala_deps() self.check_vala() self.add_os_flags('VALAFLAGS') self.env.append_unique('VALAFLAGS', ['-C']) def options(opt): """ Load the :py:mod:`waflib.Tools.gnu_dirs` tool and add the ``--vala-target-glib`` command-line option """ opt.load('gnu_dirs') valaopts = opt.add_option_group('Vala Compiler Options') valaopts.add_option('--vala-target-glib', default=None, dest='vala_target_glib', metavar='MAJOR.MINOR', help='Target version of glib for Vala GObject code generation') tdb-1.4.2/third_party/waf/waflib/Tools/waf_unit_test.py0000660000000000000000000002240013527011455023123 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Carlos Rafael Giani, 2006 # Thomas Nagy, 2010-2018 (ita) """ Unit testing system for C/C++/D and interpreted languages providing test execution: * in parallel, by using ``waf -j`` * partial (only the tests that have changed) or full (by using ``waf --alltests``) The tests are declared by adding the **test** feature to programs:: def options(opt): opt.load('compiler_cxx waf_unit_test') def configure(conf): conf.load('compiler_cxx waf_unit_test') def build(bld): bld(features='cxx cxxprogram test', source='main.cpp', target='app') # or bld.program(features='test', source='main2.cpp', target='app2') When the build is executed, the program 'test' will be built and executed without arguments. The success/failure is detected by looking at the return code. The status and the standard output/error are stored on the build context. The results can be displayed by registering a callback function. Here is how to call the predefined callback:: def build(bld): bld(features='cxx cxxprogram test', source='main.c', target='app') from waflib.Tools import waf_unit_test bld.add_post_fun(waf_unit_test.summary) By passing --dump-test-scripts the build outputs corresponding python files (with extension _run.py) that are useful for debugging purposes. """ import os, shlex, sys from waflib.TaskGen import feature, after_method, taskgen_method from waflib import Utils, Task, Logs, Options from waflib.Tools import ccroot testlock = Utils.threading.Lock() SCRIPT_TEMPLATE = """#! %(python)s import subprocess, sys cmd = %(cmd)r # if you want to debug with gdb: #cmd = ['gdb', '-args'] + cmd env = %(env)r status = subprocess.call(cmd, env=env, cwd=%(cwd)r, shell=isinstance(cmd, str)) sys.exit(status) """ @taskgen_method def handle_ut_cwd(self, key): """ Task generator method, used internally to limit code duplication. This method may disappear anytime. """ cwd = getattr(self, key, None) if cwd: if isinstance(cwd, str): # we want a Node instance if os.path.isabs(cwd): self.ut_cwd = self.bld.root.make_node(cwd) else: self.ut_cwd = self.path.make_node(cwd) @feature('test_scripts') def make_interpreted_test(self): """Create interpreted unit tests.""" for x in ['test_scripts_source', 'test_scripts_template']: if not hasattr(self, x): Logs.warn('a test_scripts taskgen i missing %s' % x) return self.ut_run, lst = Task.compile_fun(self.test_scripts_template, shell=getattr(self, 'test_scripts_shell', False)) script_nodes = self.to_nodes(self.test_scripts_source) for script_node in script_nodes: tsk = self.create_task('utest', [script_node]) tsk.vars = lst + tsk.vars tsk.env['SCRIPT'] = script_node.path_from(tsk.get_cwd()) self.handle_ut_cwd('test_scripts_cwd') env = getattr(self, 'test_scripts_env', None) if env: self.ut_env = env else: self.ut_env = dict(os.environ) paths = getattr(self, 'test_scripts_paths', {}) for (k,v) in paths.items(): p = self.ut_env.get(k, '').split(os.pathsep) if isinstance(v, str): v = v.split(os.pathsep) self.ut_env[k] = os.pathsep.join(p + v) @feature('test') @after_method('apply_link', 'process_use') def make_test(self): """Create the unit test task. There can be only one unit test task by task generator.""" if not getattr(self, 'link_task', None): return tsk = self.create_task('utest', self.link_task.outputs) if getattr(self, 'ut_str', None): self.ut_run, lst = Task.compile_fun(self.ut_str, shell=getattr(self, 'ut_shell', False)) tsk.vars = lst + tsk.vars self.handle_ut_cwd('ut_cwd') if not hasattr(self, 'ut_paths'): paths = [] for x in self.tmp_use_sorted: try: y = self.bld.get_tgen_by_name(x).link_task except AttributeError: pass else: if not isinstance(y, ccroot.stlink_task): paths.append(y.outputs[0].parent.abspath()) self.ut_paths = os.pathsep.join(paths) + os.pathsep if not hasattr(self, 'ut_env'): self.ut_env = dct = dict(os.environ) def add_path(var): dct[var] = self.ut_paths + dct.get(var,'') if Utils.is_win32: add_path('PATH') elif Utils.unversioned_sys_platform() == 'darwin': add_path('DYLD_LIBRARY_PATH') add_path('LD_LIBRARY_PATH') else: add_path('LD_LIBRARY_PATH') if not hasattr(self, 'ut_cmd'): self.ut_cmd = getattr(Options.options, 'testcmd', False) @taskgen_method def add_test_results(self, tup): """Override and return tup[1] to interrupt the build immediately if a test does not run""" Logs.debug("ut: %r", tup) try: self.utest_results.append(tup) except AttributeError: self.utest_results = [tup] try: self.bld.utest_results.append(tup) except AttributeError: self.bld.utest_results = [tup] @Task.deep_inputs class utest(Task.Task): """ Execute a unit test """ color = 'PINK' after = ['vnum', 'inst'] vars = [] def runnable_status(self): """ Always execute the task if `waf --alltests` was used or no tests if ``waf --notests`` was used """ if getattr(Options.options, 'no_tests', False): return Task.SKIP_ME ret = super(utest, self).runnable_status() if ret == Task.SKIP_ME: if getattr(Options.options, 'all_tests', False): return Task.RUN_ME return ret def get_test_env(self): """ In general, tests may require any library built anywhere in the project. Override this method if fewer paths are needed """ return self.generator.ut_env def post_run(self): super(utest, self).post_run() if getattr(Options.options, 'clear_failed_tests', False) and self.waf_unit_test_results[1]: self.generator.bld.task_sigs[self.uid()] = None def run(self): """ Execute the test. The execution is always successful, and the results are stored on ``self.generator.bld.utest_results`` for postprocessing. Override ``add_test_results`` to interrupt the build """ if hasattr(self.generator, 'ut_run'): return self.generator.ut_run(self) self.ut_exec = getattr(self.generator, 'ut_exec', [self.inputs[0].abspath()]) ut_cmd = getattr(self.generator, 'ut_cmd', False) if ut_cmd: self.ut_exec = shlex.split(ut_cmd % ' '.join(self.ut_exec)) return self.exec_command(self.ut_exec) def exec_command(self, cmd, **kw): self.generator.bld.log_command(cmd, kw) if getattr(Options.options, 'dump_test_scripts', False): script_code = SCRIPT_TEMPLATE % { 'python': sys.executable, 'env': self.get_test_env(), 'cwd': self.get_cwd().abspath(), 'cmd': cmd } script_file = self.inputs[0].abspath() + '_run.py' Utils.writef(script_file, script_code, encoding='utf-8') os.chmod(script_file, Utils.O755) if Logs.verbose > 1: Logs.info('Test debug file written as %r' % script_file) proc = Utils.subprocess.Popen(cmd, cwd=self.get_cwd().abspath(), env=self.get_test_env(), stderr=Utils.subprocess.PIPE, stdout=Utils.subprocess.PIPE, shell=isinstance(cmd,str)) (stdout, stderr) = proc.communicate() self.waf_unit_test_results = tup = (self.inputs[0].abspath(), proc.returncode, stdout, stderr) testlock.acquire() try: return self.generator.add_test_results(tup) finally: testlock.release() def get_cwd(self): return getattr(self.generator, 'ut_cwd', self.inputs[0].parent) def summary(bld): """ Display an execution summary:: def build(bld): bld(features='cxx cxxprogram test', source='main.c', target='app') from waflib.Tools import waf_unit_test bld.add_post_fun(waf_unit_test.summary) """ lst = getattr(bld, 'utest_results', []) if lst: Logs.pprint('CYAN', 'execution summary') total = len(lst) tfail = len([x for x in lst if x[1]]) Logs.pprint('GREEN', ' tests that pass %d/%d' % (total-tfail, total)) for (f, code, out, err) in lst: if not code: Logs.pprint('GREEN', ' %s' % f) Logs.pprint('GREEN' if tfail == 0 else 'RED', ' tests that fail %d/%d' % (tfail, total)) for (f, code, out, err) in lst: if code: Logs.pprint('RED', ' %s' % f) def set_exit_code(bld): """ If any of the tests fail waf will exit with that exit code. This is useful if you have an automated build system which need to report on errors from the tests. You may use it like this: def build(bld): bld(features='cxx cxxprogram test', source='main.c', target='app') from waflib.Tools import waf_unit_test bld.add_post_fun(waf_unit_test.set_exit_code) """ lst = getattr(bld, 'utest_results', []) for (f, code, out, err) in lst: if code: msg = [] if out: msg.append('stdout:%s%s' % (os.linesep, out.decode('utf-8'))) if err: msg.append('stderr:%s%s' % (os.linesep, err.decode('utf-8'))) bld.fatal(os.linesep.join(msg)) def options(opt): """ Provide the ``--alltests``, ``--notests`` and ``--testcmd`` command-line options. """ opt.add_option('--notests', action='store_true', default=False, help='Exec no unit tests', dest='no_tests') opt.add_option('--alltests', action='store_true', default=False, help='Exec all unit tests', dest='all_tests') opt.add_option('--clear-failed', action='store_true', default=False, help='Force failed unit tests to run again next time', dest='clear_failed_tests') opt.add_option('--testcmd', action='store', default=False, dest='testcmd', help='Run the unit tests using the test-cmd string example "--testcmd="valgrind --error-exitcode=1 %s" to run under valgrind') opt.add_option('--dump-test-scripts', action='store_true', default=False, help='Create python scripts to help debug tests', dest='dump_test_scripts') tdb-1.4.2/third_party/waf/waflib/Tools/winres.py0000660000000000000000000000414113527011455021561 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Brant Young, 2007 "Process *.rc* files for C/C++: X{.rc -> [.res|.rc.o]}" import re from waflib import Task from waflib.TaskGen import extension from waflib.Tools import c_preproc @extension('.rc') def rc_file(self, node): """ Binds the .rc extension to a winrc task """ obj_ext = '.rc.o' if self.env.WINRC_TGT_F == '/fo': obj_ext = '.res' rctask = self.create_task('winrc', node, node.change_ext(obj_ext)) try: self.compiled_tasks.append(rctask) except AttributeError: self.compiled_tasks = [rctask] re_lines = re.compile( r'(?:^[ \t]*(#|%:)[ \t]*(ifdef|ifndef|if|else|elif|endif|include|import|define|undef|pragma)[ \t]*(.*?)\s*$)|'\ r'(?:^\w+[ \t]*(ICON|BITMAP|CURSOR|HTML|FONT|MESSAGETABLE|TYPELIB|REGISTRY|D3DFX)[ \t]*(.*?)\s*$)', re.IGNORECASE | re.MULTILINE) class rc_parser(c_preproc.c_parser): """ Calculates dependencies in .rc files """ def filter_comments(self, node): """ Overrides :py:meth:`waflib.Tools.c_preproc.c_parser.filter_comments` """ code = node.read() if c_preproc.use_trigraphs: for (a, b) in c_preproc.trig_def: code = code.split(a).join(b) code = c_preproc.re_nl.sub('', code) code = c_preproc.re_cpp.sub(c_preproc.repl, code) ret = [] for m in re.finditer(re_lines, code): if m.group(2): ret.append((m.group(2), m.group(3))) else: ret.append(('include', m.group(5))) return ret class winrc(Task.Task): """ Compiles resource files """ run_str = '${WINRC} ${WINRCFLAGS} ${CPPPATH_ST:INCPATHS} ${DEFINES_ST:DEFINES} ${WINRC_TGT_F} ${TGT} ${WINRC_SRC_F} ${SRC}' color = 'BLUE' def scan(self): tmp = rc_parser(self.generator.includes_nodes) tmp.start(self.inputs[0], self.env) return (tmp.nodes, tmp.names) def configure(conf): """ Detects the programs RC or windres, depending on the C/C++ compiler in use """ v = conf.env if not v.WINRC: if v.CC_NAME == 'msvc': conf.find_program('RC', var='WINRC', path_list=v.PATH) v.WINRC_TGT_F = '/fo' v.WINRC_SRC_F = '' else: conf.find_program('windres', var='WINRC', path_list=v.PATH) v.WINRC_TGT_F = '-o' v.WINRC_SRC_F = '-i' tdb-1.4.2/third_party/waf/waflib/Tools/xlc.py0000660000000000000000000000264113444661622021050 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2018 (ita) # Ralf Habacker, 2006 (rh) # Yinon Ehrlich, 2009 # Michael Kuhn, 2009 from waflib.Tools import ccroot, ar from waflib.Configure import conf @conf def find_xlc(conf): """ Detects the Aix C compiler """ cc = conf.find_program(['xlc_r', 'xlc'], var='CC') conf.get_xlc_version(cc) conf.env.CC_NAME = 'xlc' @conf def xlc_common_flags(conf): """ Flags required for executing the Aix C compiler """ v = conf.env v.CC_SRC_F = [] v.CC_TGT_F = ['-c', '-o'] if not v.LINK_CC: v.LINK_CC = v.CC v.CCLNK_SRC_F = [] v.CCLNK_TGT_F = ['-o'] v.CPPPATH_ST = '-I%s' v.DEFINES_ST = '-D%s' v.LIB_ST = '-l%s' # template for adding libs v.LIBPATH_ST = '-L%s' # template for adding libpaths v.STLIB_ST = '-l%s' v.STLIBPATH_ST = '-L%s' v.RPATH_ST = '-Wl,-rpath,%s' v.SONAME_ST = [] v.SHLIB_MARKER = [] v.STLIB_MARKER = [] v.LINKFLAGS_cprogram = ['-Wl,-brtl'] v.cprogram_PATTERN = '%s' v.CFLAGS_cshlib = ['-fPIC'] v.LINKFLAGS_cshlib = ['-G', '-Wl,-brtl,-bexpfull'] v.cshlib_PATTERN = 'lib%s.so' v.LINKFLAGS_cstlib = [] v.cstlib_PATTERN = 'lib%s.a' def configure(conf): conf.find_xlc() conf.find_ar() conf.xlc_common_flags() conf.cc_load_tools() conf.cc_add_flags() conf.link_add_flags() tdb-1.4.2/third_party/waf/waflib/Tools/xlcxx.py0000660000000000000000000000267413444661622021436 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2018 (ita) # Ralf Habacker, 2006 (rh) # Yinon Ehrlich, 2009 # Michael Kuhn, 2009 from waflib.Tools import ccroot, ar from waflib.Configure import conf @conf def find_xlcxx(conf): """ Detects the Aix C++ compiler """ cxx = conf.find_program(['xlc++_r', 'xlc++'], var='CXX') conf.get_xlc_version(cxx) conf.env.CXX_NAME = 'xlc++' @conf def xlcxx_common_flags(conf): """ Flags required for executing the Aix C++ compiler """ v = conf.env v.CXX_SRC_F = [] v.CXX_TGT_F = ['-c', '-o'] if not v.LINK_CXX: v.LINK_CXX = v.CXX v.CXXLNK_SRC_F = [] v.CXXLNK_TGT_F = ['-o'] v.CPPPATH_ST = '-I%s' v.DEFINES_ST = '-D%s' v.LIB_ST = '-l%s' # template for adding libs v.LIBPATH_ST = '-L%s' # template for adding libpaths v.STLIB_ST = '-l%s' v.STLIBPATH_ST = '-L%s' v.RPATH_ST = '-Wl,-rpath,%s' v.SONAME_ST = [] v.SHLIB_MARKER = [] v.STLIB_MARKER = [] v.LINKFLAGS_cxxprogram= ['-Wl,-brtl'] v.cxxprogram_PATTERN = '%s' v.CXXFLAGS_cxxshlib = ['-fPIC'] v.LINKFLAGS_cxxshlib = ['-G', '-Wl,-brtl,-bexpfull'] v.cxxshlib_PATTERN = 'lib%s.so' v.LINKFLAGS_cxxstlib = [] v.cxxstlib_PATTERN = 'lib%s.a' def configure(conf): conf.find_xlcxx() conf.find_ar() conf.xlcxx_common_flags() conf.cxx_load_tools() conf.cxx_add_flags() conf.link_add_flags() tdb-1.4.2/third_party/waf/waflib/Utils.py0000660000000000000000000006140113527011455020254 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005-2018 (ita) """ Utilities and platform-specific fixes The portability fixes try to provide a consistent behavior of the Waf API through Python versions 2.5 to 3.X and across different platforms (win32, linux, etc) """ from __future__ import with_statement import atexit, os, sys, errno, inspect, re, datetime, platform, base64, signal, functools, time try: import cPickle except ImportError: import pickle as cPickle # leave this if os.name == 'posix' and sys.version_info[0] < 3: try: import subprocess32 as subprocess except ImportError: import subprocess else: import subprocess try: TimeoutExpired = subprocess.TimeoutExpired except AttributeError: class TimeoutExpired(Exception): pass from collections import deque, defaultdict try: import _winreg as winreg except ImportError: try: import winreg except ImportError: winreg = None from waflib import Errors try: from hashlib import md5 except ImportError: try: from hashlib import sha1 as md5 except ImportError: # never fail to enable potential fixes from another module pass else: try: md5().digest() except ValueError: # Fips? #2213 from hashlib import sha1 as md5 try: import threading except ImportError: if not 'JOBS' in os.environ: # no threading :-( os.environ['JOBS'] = '1' class threading(object): """ A fake threading class for platforms lacking the threading module. Use ``waf -j1`` on those platforms """ pass class Lock(object): """Fake Lock class""" def acquire(self): pass def release(self): pass threading.Lock = threading.Thread = Lock SIG_NIL = 'SIG_NIL_SIG_NIL_'.encode() """Arbitrary null value for hashes. Modify this value according to the hash function in use""" O644 = 420 """Constant representing the permissions for regular files (0644 raises a syntax error on python 3)""" O755 = 493 """Constant representing the permissions for executable files (0755 raises a syntax error on python 3)""" rot_chr = ['\\', '|', '/', '-'] "List of characters to use when displaying the throbber (progress bar)" rot_idx = 0 "Index of the current throbber character (progress bar)" class ordered_iter_dict(dict): """Ordered dictionary that provides iteration from the most recently inserted keys first""" def __init__(self, *k, **kw): self.lst = deque() dict.__init__(self, *k, **kw) def clear(self): dict.clear(self) self.lst = deque() def __setitem__(self, key, value): if key in dict.keys(self): self.lst.remove(key) dict.__setitem__(self, key, value) self.lst.append(key) def __delitem__(self, key): dict.__delitem__(self, key) try: self.lst.remove(key) except ValueError: pass def __iter__(self): return reversed(self.lst) def keys(self): return reversed(self.lst) class lru_node(object): """ Used by :py:class:`waflib.Utils.lru_cache` """ __slots__ = ('next', 'prev', 'key', 'val') def __init__(self): self.next = self self.prev = self self.key = None self.val = None class lru_cache(object): """ A simple least-recently used cache with lazy allocation """ __slots__ = ('maxlen', 'table', 'head') def __init__(self, maxlen=100): self.maxlen = maxlen """ Maximum amount of elements in the cache """ self.table = {} """ Mapping key-value """ self.head = lru_node() self.head.next = self.head self.head.prev = self.head def __getitem__(self, key): node = self.table[key] # assert(key==node.key) if node is self.head: return node.val # detach the node found node.prev.next = node.next node.next.prev = node.prev # replace the head node.next = self.head.next node.prev = self.head self.head = node.next.prev = node.prev.next = node return node.val def __setitem__(self, key, val): if key in self.table: # update the value for an existing key node = self.table[key] node.val = val self.__getitem__(key) else: if len(self.table) < self.maxlen: # the very first item is unused until the maximum is reached node = lru_node() node.prev = self.head node.next = self.head.next node.prev.next = node.next.prev = node else: node = self.head = self.head.next try: # that's another key del self.table[node.key] except KeyError: pass node.key = key node.val = val self.table[key] = node class lazy_generator(object): def __init__(self, fun, params): self.fun = fun self.params = params def __iter__(self): return self def __next__(self): try: it = self.it except AttributeError: it = self.it = self.fun(*self.params) return next(it) next = __next__ is_win32 = os.sep == '\\' or sys.platform == 'win32' or os.name == 'nt' # msys2 """ Whether this system is a Windows series """ def readf(fname, m='r', encoding='latin-1'): """ Reads an entire file into a string. See also :py:meth:`waflib.Node.Node.readf`:: def build(ctx): from waflib import Utils txt = Utils.readf(self.path.find_node('wscript').abspath()) txt = ctx.path.find_node('wscript').read() :type fname: string :param fname: Path to file :type m: string :param m: Open mode :type encoding: string :param encoding: encoding value, only used for python 3 :rtype: string :return: Content of the file """ if sys.hexversion > 0x3000000 and not 'b' in m: m += 'b' with open(fname, m) as f: txt = f.read() if encoding: txt = txt.decode(encoding) else: txt = txt.decode() else: with open(fname, m) as f: txt = f.read() return txt def writef(fname, data, m='w', encoding='latin-1'): """ Writes an entire file from a string. See also :py:meth:`waflib.Node.Node.writef`:: def build(ctx): from waflib import Utils txt = Utils.writef(self.path.make_node('i_like_kittens').abspath(), 'some data') self.path.make_node('i_like_kittens').write('some data') :type fname: string :param fname: Path to file :type data: string :param data: The contents to write to the file :type m: string :param m: Open mode :type encoding: string :param encoding: encoding value, only used for python 3 """ if sys.hexversion > 0x3000000 and not 'b' in m: data = data.encode(encoding) m += 'b' with open(fname, m) as f: f.write(data) def h_file(fname): """ Computes a hash value for a file by using md5. Use the md5_tstamp extension to get faster build hashes if necessary. :type fname: string :param fname: path to the file to hash :return: hash of the file contents :rtype: string or bytes """ m = md5() with open(fname, 'rb') as f: while fname: fname = f.read(200000) m.update(fname) return m.digest() def readf_win32(f, m='r', encoding='latin-1'): flags = os.O_NOINHERIT | os.O_RDONLY if 'b' in m: flags |= os.O_BINARY if '+' in m: flags |= os.O_RDWR try: fd = os.open(f, flags) except OSError: raise IOError('Cannot read from %r' % f) if sys.hexversion > 0x3000000 and not 'b' in m: m += 'b' with os.fdopen(fd, m) as f: txt = f.read() if encoding: txt = txt.decode(encoding) else: txt = txt.decode() else: with os.fdopen(fd, m) as f: txt = f.read() return txt def writef_win32(f, data, m='w', encoding='latin-1'): if sys.hexversion > 0x3000000 and not 'b' in m: data = data.encode(encoding) m += 'b' flags = os.O_CREAT | os.O_TRUNC | os.O_WRONLY | os.O_NOINHERIT if 'b' in m: flags |= os.O_BINARY if '+' in m: flags |= os.O_RDWR try: fd = os.open(f, flags) except OSError: raise OSError('Cannot write to %r' % f) with os.fdopen(fd, m) as f: f.write(data) def h_file_win32(fname): try: fd = os.open(fname, os.O_BINARY | os.O_RDONLY | os.O_NOINHERIT) except OSError: raise OSError('Cannot read from %r' % fname) m = md5() with os.fdopen(fd, 'rb') as f: while fname: fname = f.read(200000) m.update(fname) return m.digest() # always save these readf_unix = readf writef_unix = writef h_file_unix = h_file if hasattr(os, 'O_NOINHERIT') and sys.hexversion < 0x3040000: # replace the default functions readf = readf_win32 writef = writef_win32 h_file = h_file_win32 try: x = ''.encode('hex') except LookupError: import binascii def to_hex(s): ret = binascii.hexlify(s) if not isinstance(ret, str): ret = ret.decode('utf-8') return ret else: def to_hex(s): return s.encode('hex') to_hex.__doc__ = """ Return the hexadecimal representation of a string :param s: string to convert :type s: string """ def listdir_win32(s): """ Lists the contents of a folder in a portable manner. On Win32, returns the list of drive letters: ['C:', 'X:', 'Z:'] when an empty string is given. :type s: string :param s: a string, which can be empty on Windows """ if not s: try: import ctypes except ImportError: # there is nothing much we can do return [x + ':\\' for x in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'] else: dlen = 4 # length of "?:\\x00" maxdrives = 26 buf = ctypes.create_string_buffer(maxdrives * dlen) ndrives = ctypes.windll.kernel32.GetLogicalDriveStringsA(maxdrives*dlen, ctypes.byref(buf)) return [ str(buf.raw[4*i:4*i+2].decode('ascii')) for i in range(int(ndrives/dlen)) ] if len(s) == 2 and s[1] == ":": s += os.sep if not os.path.isdir(s): e = OSError('%s is not a directory' % s) e.errno = errno.ENOENT raise e return os.listdir(s) listdir = os.listdir if is_win32: listdir = listdir_win32 def num2ver(ver): """ Converts a string, tuple or version number into an integer. The number is supposed to have at most 4 digits:: from waflib.Utils import num2ver num2ver('1.3.2') == num2ver((1,3,2)) == num2ver((1,3,2,0)) :type ver: string or tuple of numbers :param ver: a version number """ if isinstance(ver, str): ver = tuple(ver.split('.')) if isinstance(ver, tuple): ret = 0 for i in range(4): if i < len(ver): ret += 256**(3 - i) * int(ver[i]) return ret return ver def to_list(val): """ Converts a string argument to a list by splitting it by spaces. Returns the object if not a string:: from waflib.Utils import to_list lst = to_list('a b c d') :param val: list of string or space-separated string :rtype: list :return: Argument converted to list """ if isinstance(val, str): return val.split() else: return val def console_encoding(): try: import ctypes except ImportError: pass else: try: codepage = ctypes.windll.kernel32.GetConsoleCP() except AttributeError: pass else: if codepage: return 'cp%d' % codepage return sys.stdout.encoding or ('cp1252' if is_win32 else 'latin-1') def split_path_unix(path): return path.split('/') def split_path_cygwin(path): if path.startswith('//'): ret = path.split('/')[2:] ret[0] = '/' + ret[0] return ret return path.split('/') re_sp = re.compile('[/\\\\]+') def split_path_win32(path): if path.startswith('\\\\'): ret = re_sp.split(path)[1:] ret[0] = '\\\\' + ret[0] if ret[0] == '\\\\?': return ret[1:] return ret return re_sp.split(path) msysroot = None def split_path_msys(path): if path.startswith(('/', '\\')) and not path.startswith(('//', '\\\\')): # msys paths can be in the form /usr/bin global msysroot if not msysroot: # msys has python 2.7 or 3, so we can use this msysroot = subprocess.check_output(['cygpath', '-w', '/']).decode(sys.stdout.encoding or 'latin-1') msysroot = msysroot.strip() path = os.path.normpath(msysroot + os.sep + path) return split_path_win32(path) if sys.platform == 'cygwin': split_path = split_path_cygwin elif is_win32: # Consider this an MSYSTEM environment if $MSYSTEM is set and python # reports is executable from a unix like path on a windows host. if os.environ.get('MSYSTEM') and sys.executable.startswith('/'): split_path = split_path_msys else: split_path = split_path_win32 else: split_path = split_path_unix split_path.__doc__ = """ Splits a path by / or \\; do not confuse this function with with ``os.path.split`` :type path: string :param path: path to split :return: list of string """ def check_dir(path): """ Ensures that a directory exists (similar to ``mkdir -p``). :type path: string :param path: Path to directory :raises: :py:class:`waflib.Errors.WafError` if the folder cannot be added. """ if not os.path.isdir(path): try: os.makedirs(path) except OSError as e: if not os.path.isdir(path): raise Errors.WafError('Cannot create the folder %r' % path, ex=e) def check_exe(name, env=None): """ Ensures that a program exists :type name: string :param name: path to the program :param env: configuration object :type env: :py:class:`waflib.ConfigSet.ConfigSet` :return: path of the program or None :raises: :py:class:`waflib.Errors.WafError` if the folder cannot be added. """ if not name: raise ValueError('Cannot execute an empty string!') def is_exe(fpath): return os.path.isfile(fpath) and os.access(fpath, os.X_OK) fpath, fname = os.path.split(name) if fpath and is_exe(name): return os.path.abspath(name) else: env = env or os.environ for path in env['PATH'].split(os.pathsep): path = path.strip('"') exe_file = os.path.join(path, name) if is_exe(exe_file): return os.path.abspath(exe_file) return None def def_attrs(cls, **kw): """ Sets default attributes on a class instance :type cls: class :param cls: the class to update the given attributes in. :type kw: dict :param kw: dictionary of attributes names and values. """ for k, v in kw.items(): if not hasattr(cls, k): setattr(cls, k, v) def quote_define_name(s): """ Converts a string into an identifier suitable for C defines. :type s: string :param s: String to convert :rtype: string :return: Identifier suitable for C defines """ fu = re.sub('[^a-zA-Z0-9]', '_', s) fu = re.sub('_+', '_', fu) fu = fu.upper() return fu re_sh = re.compile('\\s|\'|"') """ Regexp used for shell_escape below """ def shell_escape(cmd): """ Escapes a command: ['ls', '-l', 'arg space'] -> ls -l 'arg space' """ if isinstance(cmd, str): return cmd return ' '.join(repr(x) if re_sh.search(x) else x for x in cmd) def h_list(lst): """ Hashes lists of ordered data. Using hash(tup) for tuples would be much more efficient, but Python now enforces hash randomization :param lst: list to hash :type lst: list of strings :return: hash of the list """ return md5(repr(lst).encode()).digest() if sys.hexversion < 0x3000000: def h_list_python2(lst): return md5(repr(lst)).digest() h_list_python2.__doc__ = h_list.__doc__ h_list = h_list_python2 def h_fun(fun): """ Hash functions :param fun: function to hash :type fun: function :return: hash of the function :rtype: string or bytes """ try: return fun.code except AttributeError: if isinstance(fun, functools.partial): code = list(fun.args) # The method items() provides a sequence of tuples where the first element # represents an optional argument of the partial function application # # The sorting result outcome will be consistent because: # 1. tuples are compared in order of their elements # 2. optional argument namess are unique code.extend(sorted(fun.keywords.items())) code.append(h_fun(fun.func)) fun.code = h_list(code) return fun.code try: h = inspect.getsource(fun) except EnvironmentError: h = 'nocode' try: fun.code = h except AttributeError: pass return h def h_cmd(ins): """ Hashes objects recursively :param ins: input object :type ins: string or list or tuple or function :rtype: string or bytes """ # this function is not meant to be particularly fast if isinstance(ins, str): # a command is either a string ret = ins elif isinstance(ins, list) or isinstance(ins, tuple): # or a list of functions/strings ret = str([h_cmd(x) for x in ins]) else: # or just a python function ret = str(h_fun(ins)) if sys.hexversion > 0x3000000: ret = ret.encode('latin-1', 'xmlcharrefreplace') return ret reg_subst = re.compile(r"(\\\\)|(\$\$)|\$\{([^}]+)\}") def subst_vars(expr, params): """ Replaces ${VAR} with the value of VAR taken from a dict or a config set:: from waflib import Utils s = Utils.subst_vars('${PREFIX}/bin', env) :type expr: string :param expr: String to perform substitution on :param params: Dictionary or config set to look up variable values. """ def repl_var(m): if m.group(1): return '\\' if m.group(2): return '$' try: # ConfigSet instances may contain lists return params.get_flat(m.group(3)) except AttributeError: return params[m.group(3)] # if you get a TypeError, it means that 'expr' is not a string... # Utils.subst_vars(None, env) will not work return reg_subst.sub(repl_var, expr) def destos_to_binfmt(key): """ Returns the binary format based on the unversioned platform name, and defaults to ``elf`` if nothing is found. :param key: platform name :type key: string :return: string representing the binary format """ if key == 'darwin': return 'mac-o' elif key in ('win32', 'cygwin', 'uwin', 'msys'): return 'pe' return 'elf' def unversioned_sys_platform(): """ Returns the unversioned platform name. Some Python platform names contain versions, that depend on the build environment, e.g. linux2, freebsd6, etc. This returns the name without the version number. Exceptions are os2 and win32, which are returned verbatim. :rtype: string :return: Unversioned platform name """ s = sys.platform if s.startswith('java'): # The real OS is hidden under the JVM. from java.lang import System s = System.getProperty('os.name') # see http://lopica.sourceforge.net/os.html for a list of possible values if s == 'Mac OS X': return 'darwin' elif s.startswith('Windows '): return 'win32' elif s == 'OS/2': return 'os2' elif s == 'HP-UX': return 'hp-ux' elif s in ('SunOS', 'Solaris'): return 'sunos' else: s = s.lower() # powerpc == darwin for our purposes if s == 'powerpc': return 'darwin' if s == 'win32' or s == 'os2': return s if s == 'cli' and os.name == 'nt': # ironpython is only on windows as far as we know return 'win32' return re.split(r'\d+$', s)[0] def nada(*k, **kw): """ Does nothing :return: None """ pass class Timer(object): """ Simple object for timing the execution of commands. Its string representation is the duration:: from waflib.Utils import Timer timer = Timer() a_few_operations() s = str(timer) """ def __init__(self): self.start_time = self.now() def __str__(self): delta = self.now() - self.start_time if not isinstance(delta, datetime.timedelta): delta = datetime.timedelta(seconds=delta) days = delta.days hours, rem = divmod(delta.seconds, 3600) minutes, seconds = divmod(rem, 60) seconds += delta.microseconds * 1e-6 result = '' if days: result += '%dd' % days if days or hours: result += '%dh' % hours if days or hours or minutes: result += '%dm' % minutes return '%s%.3fs' % (result, seconds) def now(self): return datetime.datetime.utcnow() if hasattr(time, 'perf_counter'): def now(self): return time.perf_counter() def read_la_file(path): """ Reads property files, used by msvc.py :param path: file to read :type path: string """ sp = re.compile(r'^([^=]+)=\'(.*)\'$') dc = {} for line in readf(path).splitlines(): try: _, left, right, _ = sp.split(line.strip()) dc[left] = right except ValueError: pass return dc def run_once(fun): """ Decorator: let a function cache its results, use like this:: @run_once def foo(k): return 345*2343 .. note:: in practice this can cause memory leaks, prefer a :py:class:`waflib.Utils.lru_cache` :param fun: function to execute :type fun: function :return: the return value of the function executed """ cache = {} def wrap(*k): try: return cache[k] except KeyError: ret = fun(*k) cache[k] = ret return ret wrap.__cache__ = cache wrap.__name__ = fun.__name__ return wrap def get_registry_app_path(key, filename): """ Returns the value of a registry key for an executable :type key: string :type filename: list of string """ if not winreg: return None try: result = winreg.QueryValue(key, "Software\\Microsoft\\Windows\\CurrentVersion\\App Paths\\%s.exe" % filename[0]) except OSError: pass else: if os.path.isfile(result): return result def lib64(): """ Guess the default ``/usr/lib`` extension for 64-bit applications :return: '64' or '' :rtype: string """ # default settings for /usr/lib if os.sep == '/': if platform.architecture()[0] == '64bit': if os.path.exists('/usr/lib64') and not os.path.exists('/usr/lib32'): return '64' return '' def sane_path(p): # private function for the time being! return os.path.abspath(os.path.expanduser(p)) process_pool = [] """ List of processes started to execute sub-process commands """ def get_process(): """ Returns a process object that can execute commands as sub-processes :rtype: subprocess.Popen """ try: return process_pool.pop() except IndexError: filepath = os.path.dirname(os.path.abspath(__file__)) + os.sep + 'processor.py' cmd = [sys.executable, '-c', readf(filepath)] return subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, bufsize=0, close_fds=not is_win32) def run_prefork_process(cmd, kwargs, cargs): """ Delegates process execution to a pre-forked process instance. """ if not 'env' in kwargs: kwargs['env'] = dict(os.environ) try: obj = base64.b64encode(cPickle.dumps([cmd, kwargs, cargs])) except (TypeError, AttributeError): return run_regular_process(cmd, kwargs, cargs) proc = get_process() if not proc: return run_regular_process(cmd, kwargs, cargs) proc.stdin.write(obj) proc.stdin.write('\n'.encode()) proc.stdin.flush() obj = proc.stdout.readline() if not obj: raise OSError('Preforked sub-process %r died' % proc.pid) process_pool.append(proc) lst = cPickle.loads(base64.b64decode(obj)) # Jython wrapper failures (bash/execvp) assert len(lst) == 5 ret, out, err, ex, trace = lst if ex: if ex == 'OSError': raise OSError(trace) elif ex == 'ValueError': raise ValueError(trace) elif ex == 'TimeoutExpired': exc = TimeoutExpired(cmd, timeout=cargs['timeout'], output=out) exc.stderr = err raise exc else: raise Exception(trace) return ret, out, err def lchown(path, user=-1, group=-1): """ Change the owner/group of a path, raises an OSError if the ownership change fails. :param user: user to change :type user: int or str :param group: group to change :type group: int or str """ if isinstance(user, str): import pwd entry = pwd.getpwnam(user) if not entry: raise OSError('Unknown user %r' % user) user = entry[2] if isinstance(group, str): import grp entry = grp.getgrnam(group) if not entry: raise OSError('Unknown group %r' % group) group = entry[2] return os.lchown(path, user, group) def run_regular_process(cmd, kwargs, cargs={}): """ Executes a subprocess command by using subprocess.Popen """ proc = subprocess.Popen(cmd, **kwargs) if kwargs.get('stdout') or kwargs.get('stderr'): try: out, err = proc.communicate(**cargs) except TimeoutExpired: if kwargs.get('start_new_session') and hasattr(os, 'killpg'): os.killpg(proc.pid, signal.SIGKILL) else: proc.kill() out, err = proc.communicate() exc = TimeoutExpired(proc.args, timeout=cargs['timeout'], output=out) exc.stderr = err raise exc status = proc.returncode else: out, err = (None, None) try: status = proc.wait(**cargs) except TimeoutExpired as e: if kwargs.get('start_new_session') and hasattr(os, 'killpg'): os.killpg(proc.pid, signal.SIGKILL) else: proc.kill() proc.wait() raise e return status, out, err def run_process(cmd, kwargs, cargs={}): """ Executes a subprocess by using a pre-forked process when possible or falling back to subprocess.Popen. See :py:func:`waflib.Utils.run_prefork_process` and :py:func:`waflib.Utils.run_regular_process` """ if kwargs.get('stdout') and kwargs.get('stderr'): return run_prefork_process(cmd, kwargs, cargs) else: return run_regular_process(cmd, kwargs, cargs) def alloc_process_pool(n, force=False): """ Allocates an amount of processes to the default pool so its size is at least *n*. It is useful to call this function early so that the pre-forked processes use as little memory as possible. :param n: pool size :type n: integer :param force: if True then *n* more processes are added to the existing pool :type force: bool """ # mandatory on python2, unnecessary on python >= 3.2 global run_process, get_process, alloc_process_pool if not force: n = max(n - len(process_pool), 0) try: lst = [get_process() for x in range(n)] except OSError: run_process = run_regular_process get_process = alloc_process_pool = nada else: for x in lst: process_pool.append(x) def atexit_pool(): for k in process_pool: try: os.kill(k.pid, 9) except OSError: pass else: k.wait() # see #1889 if (sys.hexversion<0x207000f and not is_win32) or sys.hexversion>=0x306000f: atexit.register(atexit_pool) if os.environ.get('WAF_NO_PREFORK') or sys.platform == 'cli' or not sys.executable: run_process = run_regular_process get_process = alloc_process_pool = nada tdb-1.4.2/third_party/waf/waflib/__init__.py0000660000000000000000000000010713444661622020714 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005-2018 (ita) tdb-1.4.2/third_party/waf/waflib/ansiterm.py0000660000000000000000000002527313527011455021005 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 """ Emulate a vt100 terminal in cmd.exe By wrapping sys.stdout / sys.stderr with Ansiterm, the vt100 escape characters will be interpreted and the equivalent actions will be performed with Win32 console commands. """ import os, re, sys from waflib import Utils wlock = Utils.threading.Lock() try: from ctypes import Structure, windll, c_short, c_ushort, c_ulong, c_int, byref, c_wchar, POINTER, c_long except ImportError: class AnsiTerm(object): def __init__(self, stream): self.stream = stream try: self.errors = self.stream.errors except AttributeError: pass # python 2.5 self.encoding = self.stream.encoding def write(self, txt): try: wlock.acquire() self.stream.write(txt) self.stream.flush() finally: wlock.release() def fileno(self): return self.stream.fileno() def flush(self): self.stream.flush() def isatty(self): return self.stream.isatty() else: class COORD(Structure): _fields_ = [("X", c_short), ("Y", c_short)] class SMALL_RECT(Structure): _fields_ = [("Left", c_short), ("Top", c_short), ("Right", c_short), ("Bottom", c_short)] class CONSOLE_SCREEN_BUFFER_INFO(Structure): _fields_ = [("Size", COORD), ("CursorPosition", COORD), ("Attributes", c_ushort), ("Window", SMALL_RECT), ("MaximumWindowSize", COORD)] class CONSOLE_CURSOR_INFO(Structure): _fields_ = [('dwSize', c_ulong), ('bVisible', c_int)] try: _type = unicode except NameError: _type = str to_int = lambda number, default: number and int(number) or default STD_OUTPUT_HANDLE = -11 STD_ERROR_HANDLE = -12 windll.kernel32.GetStdHandle.argtypes = [c_ulong] windll.kernel32.GetStdHandle.restype = c_ulong windll.kernel32.GetConsoleScreenBufferInfo.argtypes = [c_ulong, POINTER(CONSOLE_SCREEN_BUFFER_INFO)] windll.kernel32.GetConsoleScreenBufferInfo.restype = c_long windll.kernel32.SetConsoleTextAttribute.argtypes = [c_ulong, c_ushort] windll.kernel32.SetConsoleTextAttribute.restype = c_long windll.kernel32.FillConsoleOutputCharacterW.argtypes = [c_ulong, c_wchar, c_ulong, POINTER(COORD), POINTER(c_ulong)] windll.kernel32.FillConsoleOutputCharacterW.restype = c_long windll.kernel32.FillConsoleOutputAttribute.argtypes = [c_ulong, c_ushort, c_ulong, POINTER(COORD), POINTER(c_ulong) ] windll.kernel32.FillConsoleOutputAttribute.restype = c_long windll.kernel32.SetConsoleCursorPosition.argtypes = [c_ulong, POINTER(COORD) ] windll.kernel32.SetConsoleCursorPosition.restype = c_long windll.kernel32.SetConsoleCursorInfo.argtypes = [c_ulong, POINTER(CONSOLE_CURSOR_INFO)] windll.kernel32.SetConsoleCursorInfo.restype = c_long class AnsiTerm(object): """ emulate a vt100 terminal in cmd.exe """ def __init__(self, s): self.stream = s try: self.errors = s.errors except AttributeError: pass # python2.5 self.encoding = s.encoding self.cursor_history = [] handle = (s.fileno() == 2) and STD_ERROR_HANDLE or STD_OUTPUT_HANDLE self.hconsole = windll.kernel32.GetStdHandle(handle) self._sbinfo = CONSOLE_SCREEN_BUFFER_INFO() self._csinfo = CONSOLE_CURSOR_INFO() windll.kernel32.GetConsoleCursorInfo(self.hconsole, byref(self._csinfo)) # just to double check that the console is usable self._orig_sbinfo = CONSOLE_SCREEN_BUFFER_INFO() r = windll.kernel32.GetConsoleScreenBufferInfo(self.hconsole, byref(self._orig_sbinfo)) self._isatty = r == 1 def screen_buffer_info(self): """ Updates self._sbinfo and returns it """ windll.kernel32.GetConsoleScreenBufferInfo(self.hconsole, byref(self._sbinfo)) return self._sbinfo def clear_line(self, param): mode = param and int(param) or 0 sbinfo = self.screen_buffer_info() if mode == 1: # Clear from beginning of line to cursor position line_start = COORD(0, sbinfo.CursorPosition.Y) line_length = sbinfo.Size.X elif mode == 2: # Clear entire line line_start = COORD(sbinfo.CursorPosition.X, sbinfo.CursorPosition.Y) line_length = sbinfo.Size.X - sbinfo.CursorPosition.X else: # Clear from cursor position to end of line line_start = sbinfo.CursorPosition line_length = sbinfo.Size.X - sbinfo.CursorPosition.X chars_written = c_ulong() windll.kernel32.FillConsoleOutputCharacterW(self.hconsole, c_wchar(' '), line_length, line_start, byref(chars_written)) windll.kernel32.FillConsoleOutputAttribute(self.hconsole, sbinfo.Attributes, line_length, line_start, byref(chars_written)) def clear_screen(self, param): mode = to_int(param, 0) sbinfo = self.screen_buffer_info() if mode == 1: # Clear from beginning of screen to cursor position clear_start = COORD(0, 0) clear_length = sbinfo.CursorPosition.X * sbinfo.CursorPosition.Y elif mode == 2: # Clear entire screen and return cursor to home clear_start = COORD(0, 0) clear_length = sbinfo.Size.X * sbinfo.Size.Y windll.kernel32.SetConsoleCursorPosition(self.hconsole, clear_start) else: # Clear from cursor position to end of screen clear_start = sbinfo.CursorPosition clear_length = ((sbinfo.Size.X - sbinfo.CursorPosition.X) + sbinfo.Size.X * (sbinfo.Size.Y - sbinfo.CursorPosition.Y)) chars_written = c_ulong() windll.kernel32.FillConsoleOutputCharacterW(self.hconsole, c_wchar(' '), clear_length, clear_start, byref(chars_written)) windll.kernel32.FillConsoleOutputAttribute(self.hconsole, sbinfo.Attributes, clear_length, clear_start, byref(chars_written)) def push_cursor(self, param): sbinfo = self.screen_buffer_info() self.cursor_history.append(sbinfo.CursorPosition) def pop_cursor(self, param): if self.cursor_history: old_pos = self.cursor_history.pop() windll.kernel32.SetConsoleCursorPosition(self.hconsole, old_pos) def set_cursor(self, param): y, sep, x = param.partition(';') x = to_int(x, 1) - 1 y = to_int(y, 1) - 1 sbinfo = self.screen_buffer_info() new_pos = COORD( min(max(0, x), sbinfo.Size.X), min(max(0, y), sbinfo.Size.Y) ) windll.kernel32.SetConsoleCursorPosition(self.hconsole, new_pos) def set_column(self, param): x = to_int(param, 1) - 1 sbinfo = self.screen_buffer_info() new_pos = COORD( min(max(0, x), sbinfo.Size.X), sbinfo.CursorPosition.Y ) windll.kernel32.SetConsoleCursorPosition(self.hconsole, new_pos) def move_cursor(self, x_offset=0, y_offset=0): sbinfo = self.screen_buffer_info() new_pos = COORD( min(max(0, sbinfo.CursorPosition.X + x_offset), sbinfo.Size.X), min(max(0, sbinfo.CursorPosition.Y + y_offset), sbinfo.Size.Y) ) windll.kernel32.SetConsoleCursorPosition(self.hconsole, new_pos) def move_up(self, param): self.move_cursor(y_offset = -to_int(param, 1)) def move_down(self, param): self.move_cursor(y_offset = to_int(param, 1)) def move_left(self, param): self.move_cursor(x_offset = -to_int(param, 1)) def move_right(self, param): self.move_cursor(x_offset = to_int(param, 1)) def next_line(self, param): sbinfo = self.screen_buffer_info() self.move_cursor( x_offset = -sbinfo.CursorPosition.X, y_offset = to_int(param, 1) ) def prev_line(self, param): sbinfo = self.screen_buffer_info() self.move_cursor( x_offset = -sbinfo.CursorPosition.X, y_offset = -to_int(param, 1) ) def rgb2bgr(self, c): return ((c&1) << 2) | (c&2) | ((c&4)>>2) def set_color(self, param): cols = param.split(';') sbinfo = self.screen_buffer_info() attr = sbinfo.Attributes for c in cols: c = to_int(c, 0) if 29 < c < 38: # fgcolor attr = (attr & 0xfff0) | self.rgb2bgr(c - 30) elif 39 < c < 48: # bgcolor attr = (attr & 0xff0f) | (self.rgb2bgr(c - 40) << 4) elif c == 0: # reset attr = self._orig_sbinfo.Attributes elif c == 1: # strong attr |= 0x08 elif c == 4: # blink not available -> bg intensity attr |= 0x80 elif c == 7: # negative attr = (attr & 0xff88) | ((attr & 0x70) >> 4) | ((attr & 0x07) << 4) windll.kernel32.SetConsoleTextAttribute(self.hconsole, attr) def show_cursor(self,param): self._csinfo.bVisible = 1 windll.kernel32.SetConsoleCursorInfo(self.hconsole, byref(self._csinfo)) def hide_cursor(self,param): self._csinfo.bVisible = 0 windll.kernel32.SetConsoleCursorInfo(self.hconsole, byref(self._csinfo)) ansi_command_table = { 'A': move_up, 'B': move_down, 'C': move_right, 'D': move_left, 'E': next_line, 'F': prev_line, 'G': set_column, 'H': set_cursor, 'f': set_cursor, 'J': clear_screen, 'K': clear_line, 'h': show_cursor, 'l': hide_cursor, 'm': set_color, 's': push_cursor, 'u': pop_cursor, } # Match either the escape sequence or text not containing escape sequence ansi_tokens = re.compile(r'(?:\x1b\[([0-9?;]*)([a-zA-Z])|([^\x1b]+))') def write(self, text): try: wlock.acquire() if self._isatty: for param, cmd, txt in self.ansi_tokens.findall(text): if cmd: cmd_func = self.ansi_command_table.get(cmd) if cmd_func: cmd_func(self, param) else: self.writeconsole(txt) else: # no support for colors in the console, just output the text: # eclipse or msys may be able to interpret the escape sequences self.stream.write(text) finally: wlock.release() def writeconsole(self, txt): chars_written = c_ulong() writeconsole = windll.kernel32.WriteConsoleA if isinstance(txt, _type): writeconsole = windll.kernel32.WriteConsoleW # MSDN says that there is a shared buffer of 64 KB for the console # writes. Attempt to not get ERROR_NOT_ENOUGH_MEMORY, see waf issue #746 done = 0 todo = len(txt) chunk = 32<<10 while todo != 0: doing = min(chunk, todo) buf = txt[done:done+doing] r = writeconsole(self.hconsole, buf, doing, byref(chars_written), None) if r == 0: chunk >>= 1 continue done += doing todo -= doing def fileno(self): return self.stream.fileno() def flush(self): pass def isatty(self): return self._isatty if sys.stdout.isatty() or sys.stderr.isatty(): handle = sys.stdout.isatty() and STD_OUTPUT_HANDLE or STD_ERROR_HANDLE console = windll.kernel32.GetStdHandle(handle) sbinfo = CONSOLE_SCREEN_BUFFER_INFO() def get_term_cols(): windll.kernel32.GetConsoleScreenBufferInfo(console, byref(sbinfo)) # Issue 1401 - the progress bar cannot reach the last character return sbinfo.Size.X - 1 # just try and see try: import struct, fcntl, termios except ImportError: pass else: if (sys.stdout.isatty() or sys.stderr.isatty()) and os.environ.get('TERM', '') not in ('dumb', 'emacs'): FD = sys.stdout.isatty() and sys.stdout.fileno() or sys.stderr.fileno() def fun(): return struct.unpack("HHHH", fcntl.ioctl(FD, termios.TIOCGWINSZ, struct.pack("HHHH", 0, 0, 0, 0)))[1] try: fun() except Exception as e: pass else: get_term_cols = fun tdb-1.4.2/third_party/waf/waflib/extras/__init__.py0000660000000000000000000000010713444661622022222 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005-2010 (ita) tdb-1.4.2/third_party/waf/waflib/extras/batched_cc.py0000660000000000000000000001112613444661622022525 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2015 (ita) """ Instead of compiling object files one by one, c/c++ compilers are often able to compile at once: cc -c ../file1.c ../file2.c ../file3.c Files are output on the directory where the compiler is called, and dependencies are more difficult to track (do not run the command on all source files if only one file changes) As such, we do as if the files were compiled one by one, but no command is actually run: replace each cc/cpp Task by a TaskSlave. A new task called TaskMaster collects the signatures from each slave and finds out the command-line to run. Just import this module to start using it: def build(bld): bld.load('batched_cc') Note that this is provided as an example, unity builds are recommended for best performance results (fewer tasks and fewer jobs to execute). See waflib/extras/unity.py. """ from waflib import Task, Utils from waflib.TaskGen import extension, feature, after_method from waflib.Tools import c, cxx MAX_BATCH = 50 c_str = '${CC} ${ARCH_ST:ARCH} ${CFLAGS} ${FRAMEWORKPATH_ST:FRAMEWORKPATH} ${tsk.batch_incpaths()} ${DEFINES_ST:DEFINES} -c ${SRCLST} ${CXX_TGT_F_BATCHED} ${CPPFLAGS}' c_fun, _ = Task.compile_fun_noshell(c_str) cxx_str = '${CXX} ${ARCH_ST:ARCH} ${CXXFLAGS} ${FRAMEWORKPATH_ST:FRAMEWORKPATH} ${tsk.batch_incpaths()} ${DEFINES_ST:DEFINES} -c ${SRCLST} ${CXX_TGT_F_BATCHED} ${CPPFLAGS}' cxx_fun, _ = Task.compile_fun_noshell(cxx_str) count = 70000 class batch(Task.Task): color = 'PINK' after = ['c', 'cxx'] before = ['cprogram', 'cshlib', 'cstlib', 'cxxprogram', 'cxxshlib', 'cxxstlib'] def uid(self): return Utils.h_list([Task.Task.uid(self), self.generator.idx, self.generator.path.abspath(), self.generator.target]) def __str__(self): return 'Batch compilation for %d slaves' % len(self.slaves) def __init__(self, *k, **kw): Task.Task.__init__(self, *k, **kw) self.slaves = [] self.inputs = [] self.hasrun = 0 global count count += 1 self.idx = count def add_slave(self, slave): self.slaves.append(slave) self.set_run_after(slave) def runnable_status(self): for t in self.run_after: if not t.hasrun: return Task.ASK_LATER for t in self.slaves: #if t.executed: if t.hasrun != Task.SKIPPED: return Task.RUN_ME return Task.SKIP_ME def get_cwd(self): return self.slaves[0].outputs[0].parent def batch_incpaths(self): st = self.env.CPPPATH_ST return [st % node.abspath() for node in self.generator.includes_nodes] def run(self): self.outputs = [] srclst = [] slaves = [] for t in self.slaves: if t.hasrun != Task.SKIPPED: slaves.append(t) srclst.append(t.inputs[0].abspath()) self.env.SRCLST = srclst if self.slaves[0].__class__.__name__ == 'c': ret = c_fun(self) else: ret = cxx_fun(self) if ret: return ret for t in slaves: t.old_post_run() def hook(cls_type): def n_hook(self, node): ext = '.obj' if self.env.CC_NAME == 'msvc' else '.o' name = node.name k = name.rfind('.') if k >= 0: basename = name[:k] + ext else: basename = name + ext outdir = node.parent.get_bld().make_node('%d' % self.idx) outdir.mkdir() out = outdir.find_or_declare(basename) task = self.create_task(cls_type, node, out) try: self.compiled_tasks.append(task) except AttributeError: self.compiled_tasks = [task] if not getattr(self, 'masters', None): self.masters = {} self.allmasters = [] def fix_path(tsk): if self.env.CC_NAME == 'msvc': tsk.env.append_unique('CXX_TGT_F_BATCHED', '/Fo%s\\' % outdir.abspath()) if not node.parent in self.masters: m = self.masters[node.parent] = self.master = self.create_task('batch') fix_path(m) self.allmasters.append(m) else: m = self.masters[node.parent] if len(m.slaves) > MAX_BATCH: m = self.masters[node.parent] = self.master = self.create_task('batch') fix_path(m) self.allmasters.append(m) m.add_slave(task) return task return n_hook extension('.c')(hook('c')) extension('.cpp','.cc','.cxx','.C','.c++')(hook('cxx')) @feature('cprogram', 'cshlib', 'cstaticlib', 'cxxprogram', 'cxxshlib', 'cxxstlib') @after_method('apply_link') def link_after_masters(self): if getattr(self, 'allmasters', None): for m in self.allmasters: self.link_task.set_run_after(m) # Modify the c and cxx task classes - in theory it would be best to # create subclasses and to re-map the c/c++ extensions for x in ('c', 'cxx'): t = Task.classes[x] def run(self): pass def post_run(self): pass setattr(t, 'oldrun', getattr(t, 'run', None)) setattr(t, 'run', run) setattr(t, 'old_post_run', t.post_run) setattr(t, 'post_run', post_run) tdb-1.4.2/third_party/waf/waflib/extras/biber.py0000660000000000000000000000313513444661622021552 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2011 (ita) """ Latex processing using "biber" """ import os from waflib import Task, Logs from waflib.Tools import tex as texmodule class tex(texmodule.tex): biber_fun, _ = Task.compile_fun('${BIBER} ${BIBERFLAGS} ${SRCFILE}',shell=False) biber_fun.__doc__ = """ Execute the program **biber** """ def bibfile(self): return None def bibunits(self): self.env.env = {} self.env.env.update(os.environ) self.env.env.update({'BIBINPUTS': self.texinputs(), 'BSTINPUTS': self.texinputs()}) self.env.SRCFILE = self.aux_nodes[0].name[:-4] if not self.env['PROMPT_LATEX']: self.env.append_unique('BIBERFLAGS', '--quiet') path = self.aux_nodes[0].abspath()[:-4] + '.bcf' if os.path.isfile(path): Logs.warn('calling biber') self.check_status('error when calling biber, check %s.blg for errors' % (self.env.SRCFILE), self.biber_fun()) else: super(tex, self).bibfile() super(tex, self).bibunits() class latex(tex): texfun, vars = Task.compile_fun('${LATEX} ${LATEXFLAGS} ${SRCFILE}', shell=False) class pdflatex(tex): texfun, vars = Task.compile_fun('${PDFLATEX} ${PDFLATEXFLAGS} ${SRCFILE}', shell=False) class xelatex(tex): texfun, vars = Task.compile_fun('${XELATEX} ${XELATEXFLAGS} ${SRCFILE}', shell=False) def configure(self): """ Almost the same as in tex.py, but try to detect 'biber' """ v = self.env for p in ' biber tex latex pdflatex xelatex bibtex dvips dvipdf ps2pdf makeindex pdf2ps'.split(): try: self.find_program(p, var=p.upper()) except self.errors.ConfigurationError: pass v['DVIPSFLAGS'] = '-Ppdf' tdb-1.4.2/third_party/waf/waflib/extras/bjam.py0000660000000000000000000000746513444661622021412 0ustar rootroot00000000000000#! /usr/bin/env python # per rosengren 2011 from os import sep, readlink from waflib import Logs from waflib.TaskGen import feature, after_method from waflib.Task import Task, always_run def options(opt): grp = opt.add_option_group('Bjam Options') grp.add_option('--bjam_src', default=None, help='You can find it in /tools/jam/src') grp.add_option('--bjam_uname', default='linuxx86_64', help='bjam is built in /bin./bjam') grp.add_option('--bjam_config', default=None) grp.add_option('--bjam_toolset', default=None) def configure(cnf): if not cnf.env.BJAM_SRC: cnf.env.BJAM_SRC = cnf.options.bjam_src if not cnf.env.BJAM_UNAME: cnf.env.BJAM_UNAME = cnf.options.bjam_uname try: cnf.find_program('bjam', path_list=[ cnf.env.BJAM_SRC + sep + 'bin.' + cnf.env.BJAM_UNAME ]) except Exception: cnf.env.BJAM = None if not cnf.env.BJAM_CONFIG: cnf.env.BJAM_CONFIG = cnf.options.bjam_config if not cnf.env.BJAM_TOOLSET: cnf.env.BJAM_TOOLSET = cnf.options.bjam_toolset @feature('bjam') @after_method('process_rule') def process_bjam(self): if not self.bld.env.BJAM: self.create_task('bjam_creator') self.create_task('bjam_build') self.create_task('bjam_installer') if getattr(self, 'always', False): always_run(bjam_creator) always_run(bjam_build) always_run(bjam_installer) class bjam_creator(Task): ext_out = 'bjam_exe' vars=['BJAM_SRC', 'BJAM_UNAME'] def run(self): env = self.env gen = self.generator bjam = gen.bld.root.find_dir(env.BJAM_SRC) if not bjam: Logs.error('Can not find bjam source') return -1 bjam_exe_relpath = 'bin.' + env.BJAM_UNAME + '/bjam' bjam_exe = bjam.find_resource(bjam_exe_relpath) if bjam_exe: env.BJAM = bjam_exe.srcpath() return 0 bjam_cmd = ['./build.sh'] Logs.debug('runner: ' + bjam.srcpath() + '> ' + str(bjam_cmd)) result = self.exec_command(bjam_cmd, cwd=bjam.srcpath()) if not result == 0: Logs.error('bjam failed') return -1 bjam_exe = bjam.find_resource(bjam_exe_relpath) if bjam_exe: env.BJAM = bjam_exe.srcpath() return 0 Logs.error('bjam failed') return -1 class bjam_build(Task): ext_in = 'bjam_exe' ext_out = 'install' vars = ['BJAM_TOOLSET'] def run(self): env = self.env gen = self.generator path = gen.path bld = gen.bld if hasattr(gen, 'root'): build_root = path.find_node(gen.root) else: build_root = path jam = bld.srcnode.find_resource(env.BJAM_CONFIG) if jam: Logs.debug('bjam: Using jam configuration from ' + jam.srcpath()) jam_rel = jam.relpath_gen(build_root) else: Logs.warn('No build configuration in build_config/user-config.jam. Using default') jam_rel = None bjam_exe = bld.srcnode.find_node(env.BJAM) if not bjam_exe: Logs.error('env.BJAM is not set') return -1 bjam_exe_rel = bjam_exe.relpath_gen(build_root) cmd = ([bjam_exe_rel] + (['--user-config=' + jam_rel] if jam_rel else []) + ['--stagedir=' + path.get_bld().path_from(build_root)] + ['--debug-configuration'] + ['--with-' + lib for lib in self.generator.target] + (['toolset=' + env.BJAM_TOOLSET] if env.BJAM_TOOLSET else []) + ['link=' + 'shared'] + ['variant=' + 'release'] ) Logs.debug('runner: ' + build_root.srcpath() + '> ' + str(cmd)) ret = self.exec_command(cmd, cwd=build_root.srcpath()) if ret != 0: return ret self.set_outputs(path.get_bld().ant_glob('lib/*') + path.get_bld().ant_glob('bin/*')) return 0 class bjam_installer(Task): ext_in = 'install' def run(self): gen = self.generator path = gen.path for idir, pat in (('${LIBDIR}', 'lib/*'), ('${BINDIR}', 'bin/*')): files = [] for n in path.get_bld().ant_glob(pat): try: t = readlink(n.srcpath()) gen.bld.symlink_as(sep.join([idir, n.name]), t, postpone=False) except OSError: files.append(n) gen.bld.install_files(idir, files, postpone=False) return 0 tdb-1.4.2/third_party/waf/waflib/extras/blender.py0000660000000000000000000000577513444661622022116 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Michal Proszek, 2014 (poxip) """ Detect the version of Blender, path and install the extension: def options(opt): opt.load('blender') def configure(cnf): cnf.load('blender') def build(bld): bld(name='io_mesh_raw', feature='blender', files=['file1.py', 'file2.py'] ) If name variable is empty, files are installed in scripts/addons, otherwise scripts/addons/name Use ./waf configure --system to set the installation directory to system path """ import os import re from getpass import getuser from waflib import Utils from waflib.TaskGen import feature from waflib.Configure import conf def options(opt): opt.add_option( '-s', '--system', dest='directory_system', default=False, action='store_true', help='determines installation directory (default: user)' ) @conf def find_blender(ctx): '''Return version number of blender, if not exist return None''' blender = ctx.find_program('blender') output = ctx.cmd_and_log(blender + ['--version']) m = re.search(r'Blender\s*((\d+(\.|))*)', output) if not m: ctx.fatal('Could not retrieve blender version') try: blender_version = m.group(1) except IndexError: ctx.fatal('Could not retrieve blender version') ctx.env['BLENDER_VERSION'] = blender_version return blender @conf def configure_paths(ctx): """Setup blender paths""" # Get the username user = getuser() _platform = Utils.unversioned_sys_platform() config_path = {'user': '', 'system': ''} if _platform.startswith('linux'): config_path['user'] = '/home/%s/.config/blender/' % user config_path['system'] = '/usr/share/blender/' elif _platform == 'darwin': # MAC OS X config_path['user'] = \ '/Users/%s/Library/Application Support/Blender/' % user config_path['system'] = '/Library/Application Support/Blender/' elif Utils.is_win32: # Windows appdata_path = ctx.getenv('APPDATA').replace('\\', '/') homedrive = ctx.getenv('HOMEDRIVE').replace('\\', '/') config_path['user'] = '%s/Blender Foundation/Blender/' % appdata_path config_path['system'] = \ '%sAll Users/AppData/Roaming/Blender Foundation/Blender/' % homedrive else: ctx.fatal( 'Unsupported platform. ' 'Available platforms: Linux, OSX, MS-Windows.' ) blender_version = ctx.env['BLENDER_VERSION'] config_path['user'] += blender_version + '/' config_path['system'] += blender_version + '/' ctx.env['BLENDER_CONFIG_DIR'] = os.path.abspath(config_path['user']) if ctx.options.directory_system: ctx.env['BLENDER_CONFIG_DIR'] = config_path['system'] ctx.env['BLENDER_ADDONS_DIR'] = os.path.join( ctx.env['BLENDER_CONFIG_DIR'], 'scripts/addons' ) Utils.check_dir(ctx.env['BLENDER_ADDONS_DIR']) def configure(ctx): ctx.find_blender() ctx.configure_paths() @feature('blender_list') def blender(self): # Two ways to install a blender extension: as a module or just .py files dest_dir = os.path.join(self.env.BLENDER_ADDONS_DIR, self.get_name()) Utils.check_dir(dest_dir) self.add_install_files(install_to=dest_dir, install_from=getattr(self, 'files', '.')) tdb-1.4.2/third_party/waf/waflib/extras/boo.py0000660000000000000000000000435013444661622021246 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Yannick LM 2011 """ Support for the boo programming language, for example:: bld(features = "boo", # necessary feature source = "src.boo", # list of boo files gen = "world.dll", # target type = "library", # library/exe ("-target:xyz" flag) name = "world" # necessary if the target is referenced by 'use' ) """ from waflib import Task from waflib.Configure import conf from waflib.TaskGen import feature, after_method, before_method, extension @extension('.boo') def boo_hook(self, node): # Nothing here yet ... # TODO filter the non-boo source files in 'apply_booc' and remove this method pass @feature('boo') @before_method('process_source') def apply_booc(self): """Create a booc task """ src_nodes = self.to_nodes(self.source) out_node = self.path.find_or_declare(self.gen) self.boo_task = self.create_task('booc', src_nodes, [out_node]) # Set variables used by the 'booc' task self.boo_task.env.OUT = '-o:%s' % out_node.abspath() # type is "exe" by default type = getattr(self, "type", "exe") self.boo_task.env.BOO_TARGET_TYPE = "-target:%s" % type @feature('boo') @after_method('apply_boo') def use_boo(self): """" boo applications honor the **use** keyword:: """ dep_names = self.to_list(getattr(self, 'use', [])) for dep_name in dep_names: dep_task_gen = self.bld.get_tgen_by_name(dep_name) if not dep_task_gen: continue dep_task_gen.post() dep_task = getattr(dep_task_gen, 'boo_task', None) if not dep_task: # Try a cs task: dep_task = getattr(dep_task_gen, 'cs_task', None) if not dep_task: # Try a link task: dep_task = getattr(dep_task, 'link_task', None) if not dep_task: # Abort ... continue self.boo_task.set_run_after(dep_task) # order self.boo_task.dep_nodes.extend(dep_task.outputs) # dependency self.boo_task.env.append_value('BOO_FLAGS', '-reference:%s' % dep_task.outputs[0].abspath()) class booc(Task.Task): """Compiles .boo files """ color = 'YELLOW' run_str = '${BOOC} ${BOO_FLAGS} ${BOO_TARGET_TYPE} ${OUT} ${SRC}' @conf def check_booc(self): self.find_program('booc', 'BOOC') self.env.BOO_FLAGS = ['-nologo'] def configure(self): """Check that booc is available """ self.check_booc() tdb-1.4.2/third_party/waf/waflib/extras/boost.py0000660000000000000000000004402513444661622021620 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # # partially based on boost.py written by Gernot Vormayr # written by Ruediger Sonderfeld , 2008 # modified by Bjoern Michaelsen, 2008 # modified by Luca Fossati, 2008 # rewritten for waf 1.5.1, Thomas Nagy, 2008 # rewritten for waf 1.6.2, Sylvain Rouquette, 2011 ''' This is an extra tool, not bundled with the default waf binary. To add the boost tool to the waf file: $ ./waf-light --tools=compat15,boost or, if you have waf >= 1.6.2 $ ./waf update --files=boost When using this tool, the wscript will look like: def options(opt): opt.load('compiler_cxx boost') def configure(conf): conf.load('compiler_cxx boost') conf.check_boost(lib='system filesystem') def build(bld): bld(source='main.cpp', target='app', use='BOOST') Options are generated, in order to specify the location of boost includes/libraries. The `check_boost` configuration function allows to specify the used boost libraries. It can also provide default arguments to the --boost-mt command-line arguments. Everything will be packaged together in a BOOST component that you can use. When using MSVC, a lot of compilation flags need to match your BOOST build configuration: - you may have to add /EHsc to your CXXFLAGS or define boost::throw_exception if BOOST_NO_EXCEPTIONS is defined. Errors: C4530 - boost libraries will try to be smart and use the (pretty but often not useful) auto-linking feature of MSVC So before calling `conf.check_boost` you might want to disabling by adding conf.env.DEFINES_BOOST += ['BOOST_ALL_NO_LIB'] Errors: - boost might also be compiled with /MT, which links the runtime statically. If you have problems with redefined symbols, self.env['DEFINES_%s' % var] += ['BOOST_ALL_NO_LIB'] self.env['CXXFLAGS_%s' % var] += ['/MD', '/EHsc'] Passing `--boost-linkage_autodetect` might help ensuring having a correct linkage in some basic cases. ''' import sys import re from waflib import Utils, Logs, Errors from waflib.Configure import conf from waflib.TaskGen import feature, after_method BOOST_LIBS = ['/usr/lib', '/usr/local/lib', '/opt/local/lib', '/sw/lib', '/lib'] BOOST_INCLUDES = ['/usr/include', '/usr/local/include', '/opt/local/include', '/sw/include'] BOOST_VERSION_FILE = 'boost/version.hpp' BOOST_VERSION_CODE = ''' #include #include int main() { std::cout << BOOST_LIB_VERSION << ":" << BOOST_VERSION << std::endl; } ''' BOOST_ERROR_CODE = ''' #include int main() { boost::system::error_code c; } ''' PTHREAD_CODE = ''' #include static void* f(void*) { return 0; } int main() { pthread_t th; pthread_attr_t attr; pthread_attr_init(&attr); pthread_create(&th, &attr, &f, 0); pthread_join(th, 0); pthread_cleanup_push(0, 0); pthread_cleanup_pop(0); pthread_attr_destroy(&attr); } ''' BOOST_THREAD_CODE = ''' #include int main() { boost::thread t; } ''' BOOST_LOG_CODE = ''' #include #include #include int main() { using namespace boost::log; add_common_attributes(); add_console_log(std::clog, keywords::format = "%Message%"); BOOST_LOG_TRIVIAL(debug) << "log is working" << std::endl; } ''' # toolsets from {boost_dir}/tools/build/v2/tools/common.jam PLATFORM = Utils.unversioned_sys_platform() detect_intel = lambda env: (PLATFORM == 'win32') and 'iw' or 'il' detect_clang = lambda env: (PLATFORM == 'darwin') and 'clang-darwin' or 'clang' detect_mingw = lambda env: (re.search('MinGW', env.CXX[0])) and 'mgw' or 'gcc' BOOST_TOOLSETS = { 'borland': 'bcb', 'clang': detect_clang, 'como': 'como', 'cw': 'cw', 'darwin': 'xgcc', 'edg': 'edg', 'g++': detect_mingw, 'gcc': detect_mingw, 'icpc': detect_intel, 'intel': detect_intel, 'kcc': 'kcc', 'kylix': 'bck', 'mipspro': 'mp', 'mingw': 'mgw', 'msvc': 'vc', 'qcc': 'qcc', 'sun': 'sw', 'sunc++': 'sw', 'tru64cxx': 'tru', 'vacpp': 'xlc' } def options(opt): opt = opt.add_option_group('Boost Options') opt.add_option('--boost-includes', type='string', default='', dest='boost_includes', help='''path to the directory where the boost includes are, e.g., /path/to/boost_1_55_0/stage/include''') opt.add_option('--boost-libs', type='string', default='', dest='boost_libs', help='''path to the directory where the boost libs are, e.g., path/to/boost_1_55_0/stage/lib''') opt.add_option('--boost-mt', action='store_true', default=False, dest='boost_mt', help='select multi-threaded libraries') opt.add_option('--boost-abi', type='string', default='', dest='boost_abi', help='''select libraries with tags (gd for debug, static is automatically added), see doc Boost, Getting Started, chapter 6.1''') opt.add_option('--boost-linkage_autodetect', action="store_true", dest='boost_linkage_autodetect', help="auto-detect boost linkage options (don't get used to it / might break other stuff)") opt.add_option('--boost-toolset', type='string', default='', dest='boost_toolset', help='force a toolset e.g. msvc, vc90, \ gcc, mingw, mgw45 (default: auto)') py_version = '%d%d' % (sys.version_info[0], sys.version_info[1]) opt.add_option('--boost-python', type='string', default=py_version, dest='boost_python', help='select the lib python with this version \ (default: %s)' % py_version) @conf def __boost_get_version_file(self, d): if not d: return None dnode = self.root.find_dir(d) if dnode: return dnode.find_node(BOOST_VERSION_FILE) return None @conf def boost_get_version(self, d): """silently retrieve the boost version number""" node = self.__boost_get_version_file(d) if node: try: txt = node.read() except EnvironmentError: Logs.error("Could not read the file %r", node.abspath()) else: re_but1 = re.compile('^#define\\s+BOOST_LIB_VERSION\\s+"(.+)"', re.M) m1 = re_but1.search(txt) re_but2 = re.compile('^#define\\s+BOOST_VERSION\\s+(\\d+)', re.M) m2 = re_but2.search(txt) if m1 and m2: return (m1.group(1), m2.group(1)) return self.check_cxx(fragment=BOOST_VERSION_CODE, includes=[d], execute=True, define_ret=True).split(":") @conf def boost_get_includes(self, *k, **kw): includes = k and k[0] or kw.get('includes') if includes and self.__boost_get_version_file(includes): return includes for d in self.environ.get('INCLUDE', '').split(';') + BOOST_INCLUDES: if self.__boost_get_version_file(d): return d if includes: self.end_msg('headers not found in %s' % includes) self.fatal('The configuration failed') else: self.end_msg('headers not found, please provide a --boost-includes argument (see help)') self.fatal('The configuration failed') @conf def boost_get_toolset(self, cc): toolset = cc if not cc: build_platform = Utils.unversioned_sys_platform() if build_platform in BOOST_TOOLSETS: cc = build_platform else: cc = self.env.CXX_NAME if cc in BOOST_TOOLSETS: toolset = BOOST_TOOLSETS[cc] return isinstance(toolset, str) and toolset or toolset(self.env) @conf def __boost_get_libs_path(self, *k, **kw): ''' return the lib path and all the files in it ''' if 'files' in kw: return self.root.find_dir('.'), Utils.to_list(kw['files']) libs = k and k[0] or kw.get('libs') if libs: path = self.root.find_dir(libs) files = path.ant_glob('*boost_*') if not libs or not files: for d in self.environ.get('LIB', '').split(';') + BOOST_LIBS: if not d: continue path = self.root.find_dir(d) if path: files = path.ant_glob('*boost_*') if files: break path = self.root.find_dir(d + '64') if path: files = path.ant_glob('*boost_*') if files: break if not path: if libs: self.end_msg('libs not found in %s' % libs) self.fatal('The configuration failed') else: self.end_msg('libs not found, please provide a --boost-libs argument (see help)') self.fatal('The configuration failed') self.to_log('Found the boost path in %r with the libraries:' % path) for x in files: self.to_log(' %r' % x) return path, files @conf def boost_get_libs(self, *k, **kw): ''' return the lib path and the required libs according to the parameters ''' path, files = self.__boost_get_libs_path(**kw) files = sorted(files, key=lambda f: (len(f.name), f.name), reverse=True) toolset = self.boost_get_toolset(kw.get('toolset', '')) toolset_pat = '(-%s[0-9]{0,3})' % toolset version = '-%s' % self.env.BOOST_VERSION def find_lib(re_lib, files): for file in files: if re_lib.search(file.name): self.to_log('Found boost lib %s' % file) return file return None def format_lib_name(name): if name.startswith('lib') and self.env.CC_NAME != 'msvc': name = name[3:] return name[:name.rfind('.')] def match_libs(lib_names, is_static): libs = [] lib_names = Utils.to_list(lib_names) if not lib_names: return libs t = [] if kw.get('mt', False): t.append('-mt') if kw.get('abi'): t.append('%s%s' % (is_static and '-s' or '-', kw['abi'])) elif is_static: t.append('-s') tags_pat = t and ''.join(t) or '' ext = is_static and self.env.cxxstlib_PATTERN or self.env.cxxshlib_PATTERN ext = ext.partition('%s')[2] # remove '%s' or 'lib%s' from PATTERN for lib in lib_names: if lib == 'python': # for instance, with python='27', # accepts '-py27', '-py2', '27', '-2.7' and '2' # but will reject '-py3', '-py26', '26' and '3' tags = '({0})?((-py{2})|(-py{1}(?=[^0-9]))|({2})|(-{1}.{3})|({1}(?=[^0-9]))|(?=[^0-9])(?!-py))'.format(tags_pat, kw['python'][0], kw['python'], kw['python'][1]) else: tags = tags_pat # Trying libraries, from most strict match to least one for pattern in ['boost_%s%s%s%s%s$' % (lib, toolset_pat, tags, version, ext), 'boost_%s%s%s%s$' % (lib, tags, version, ext), # Give up trying to find the right version 'boost_%s%s%s%s$' % (lib, toolset_pat, tags, ext), 'boost_%s%s%s$' % (lib, tags, ext), 'boost_%s%s$' % (lib, ext), 'boost_%s' % lib]: self.to_log('Trying pattern %s' % pattern) file = find_lib(re.compile(pattern), files) if file: libs.append(format_lib_name(file.name)) break else: self.end_msg('lib %s not found in %s' % (lib, path.abspath())) self.fatal('The configuration failed') return libs return path.abspath(), match_libs(kw.get('lib'), False), match_libs(kw.get('stlib'), True) @conf def _check_pthread_flag(self, *k, **kw): ''' Computes which flags should be added to CXXFLAGS and LINKFLAGS to compile in multi-threading mode Yes, we *need* to put the -pthread thing in CPPFLAGS because with GCC3, boost/thread.hpp will trigger a #error if -pthread isn't used: boost/config/requires_threads.hpp:47:5: #error "Compiler threading support is not turned on. Please set the correct command line options for threading: -pthread (Linux), -pthreads (Solaris) or -mthreads (Mingw32)" Based on _BOOST_PTHREAD_FLAG(): https://github.com/tsuna/boost.m4/blob/master/build-aux/boost.m4 ''' var = kw.get('uselib_store', 'BOOST') self.start_msg('Checking the flags needed to use pthreads') # The ordering *is* (sometimes) important. Some notes on the # individual items follow: # (none): in case threads are in libc; should be tried before -Kthread and # other compiler flags to prevent continual compiler warnings # -lpthreads: AIX (must check this before -lpthread) # -Kthread: Sequent (threads in libc, but -Kthread needed for pthread.h) # -kthread: FreeBSD kernel threads (preferred to -pthread since SMP-able) # -llthread: LinuxThreads port on FreeBSD (also preferred to -pthread) # -pthread: GNU Linux/GCC (kernel threads), BSD/GCC (userland threads) # -pthreads: Solaris/GCC # -mthreads: MinGW32/GCC, Lynx/GCC # -mt: Sun Workshop C (may only link SunOS threads [-lthread], but it # doesn't hurt to check since this sometimes defines pthreads too; # also defines -D_REENTRANT) # ... -mt is also the pthreads flag for HP/aCC # -lpthread: GNU Linux, etc. # --thread-safe: KAI C++ if Utils.unversioned_sys_platform() == "sunos": # On Solaris (at least, for some versions), libc contains stubbed # (non-functional) versions of the pthreads routines, so link-based # tests will erroneously succeed. (We need to link with -pthreads/-mt/ # -lpthread.) (The stubs are missing pthread_cleanup_push, or rather # a function called by this macro, so we could check for that, but # who knows whether they'll stub that too in a future libc.) So, # we'll just look for -pthreads and -lpthread first: boost_pthread_flags = ["-pthreads", "-lpthread", "-mt", "-pthread"] else: boost_pthread_flags = ["", "-lpthreads", "-Kthread", "-kthread", "-llthread", "-pthread", "-pthreads", "-mthreads", "-lpthread", "--thread-safe", "-mt"] for boost_pthread_flag in boost_pthread_flags: try: self.env.stash() self.env.append_value('CXXFLAGS_%s' % var, boost_pthread_flag) self.env.append_value('LINKFLAGS_%s' % var, boost_pthread_flag) self.check_cxx(code=PTHREAD_CODE, msg=None, use=var, execute=False) self.end_msg(boost_pthread_flag) return except self.errors.ConfigurationError: self.env.revert() self.end_msg('None') @conf def check_boost(self, *k, **kw): """ Initialize boost libraries to be used. Keywords: you can pass the same parameters as with the command line (without "--boost-"). Note that the command line has the priority, and should preferably be used. """ if not self.env['CXX']: self.fatal('load a c++ compiler first, conf.load("compiler_cxx")') params = { 'lib': k and k[0] or kw.get('lib'), 'stlib': kw.get('stlib') } for key, value in self.options.__dict__.items(): if not key.startswith('boost_'): continue key = key[len('boost_'):] params[key] = value and value or kw.get(key, '') var = kw.get('uselib_store', 'BOOST') self.find_program('dpkg-architecture', var='DPKG_ARCHITECTURE', mandatory=False) if self.env.DPKG_ARCHITECTURE: deb_host_multiarch = self.cmd_and_log([self.env.DPKG_ARCHITECTURE[0], '-qDEB_HOST_MULTIARCH']) BOOST_LIBS.insert(0, '/usr/lib/%s' % deb_host_multiarch.strip()) self.start_msg('Checking boost includes') self.env['INCLUDES_%s' % var] = inc = self.boost_get_includes(**params) versions = self.boost_get_version(inc) self.env.BOOST_VERSION = versions[0] self.env.BOOST_VERSION_NUMBER = int(versions[1]) self.end_msg("%d.%d.%d" % (int(versions[1]) / 100000, int(versions[1]) / 100 % 1000, int(versions[1]) % 100)) if Logs.verbose: Logs.pprint('CYAN', ' path : %s' % self.env['INCLUDES_%s' % var]) if not params['lib'] and not params['stlib']: return if 'static' in kw or 'static' in params: Logs.warn('boost: static parameter is deprecated, use stlib instead.') self.start_msg('Checking boost libs') path, libs, stlibs = self.boost_get_libs(**params) self.env['LIBPATH_%s' % var] = [path] self.env['STLIBPATH_%s' % var] = [path] self.env['LIB_%s' % var] = libs self.env['STLIB_%s' % var] = stlibs self.end_msg('ok') if Logs.verbose: Logs.pprint('CYAN', ' path : %s' % path) Logs.pprint('CYAN', ' shared libs : %s' % libs) Logs.pprint('CYAN', ' static libs : %s' % stlibs) def has_shlib(lib): return params['lib'] and lib in params['lib'] def has_stlib(lib): return params['stlib'] and lib in params['stlib'] def has_lib(lib): return has_shlib(lib) or has_stlib(lib) if has_lib('thread'): # not inside try_link to make check visible in the output self._check_pthread_flag(k, kw) def try_link(): if has_lib('system'): self.check_cxx(fragment=BOOST_ERROR_CODE, use=var, execute=False) if has_lib('thread'): self.check_cxx(fragment=BOOST_THREAD_CODE, use=var, execute=False) if has_lib('log'): if not has_lib('thread'): self.env['DEFINES_%s' % var] += ['BOOST_LOG_NO_THREADS'] if has_shlib('log'): self.env['DEFINES_%s' % var] += ['BOOST_LOG_DYN_LINK'] self.check_cxx(fragment=BOOST_LOG_CODE, use=var, execute=False) if params.get('linkage_autodetect', False): self.start_msg("Attempting to detect boost linkage flags") toolset = self.boost_get_toolset(kw.get('toolset', '')) if toolset in ('vc',): # disable auto-linking feature, causing error LNK1181 # because the code wants to be linked against self.env['DEFINES_%s' % var] += ['BOOST_ALL_NO_LIB'] # if no dlls are present, we guess the .lib files are not stubs has_dlls = False for x in Utils.listdir(path): if x.endswith(self.env.cxxshlib_PATTERN % ''): has_dlls = True break if not has_dlls: self.env['STLIBPATH_%s' % var] = [path] self.env['STLIB_%s' % var] = libs del self.env['LIB_%s' % var] del self.env['LIBPATH_%s' % var] # we attempt to play with some known-to-work CXXFLAGS combinations for cxxflags in (['/MD', '/EHsc'], []): self.env.stash() self.env["CXXFLAGS_%s" % var] += cxxflags try: try_link() except Errors.ConfigurationError as e: self.env.revert() exc = e else: self.end_msg("ok: winning cxxflags combination: %s" % (self.env["CXXFLAGS_%s" % var])) exc = None self.env.commit() break if exc is not None: self.end_msg("Could not auto-detect boost linking flags combination, you may report it to boost.py author", ex=exc) self.fatal('The configuration failed') else: self.end_msg("Boost linkage flags auto-detection not implemented (needed ?) for this toolchain") self.fatal('The configuration failed') else: self.start_msg('Checking for boost linkage') try: try_link() except Errors.ConfigurationError as e: self.end_msg("Could not link against boost libraries using supplied options") self.fatal('The configuration failed') self.end_msg('ok') @feature('cxx') @after_method('apply_link') def install_boost(self): if install_boost.done or not Utils.is_win32 or not self.bld.cmd.startswith('install'): return install_boost.done = True inst_to = getattr(self, 'install_path', '${BINDIR}') for lib in self.env.LIB_BOOST: try: file = self.bld.find_file(self.env.cxxshlib_PATTERN % lib, self.env.LIBPATH_BOOST) self.add_install_files(install_to=inst_to, install_from=self.bld.root.find_node(file)) except: continue install_boost.done = False tdb-1.4.2/third_party/waf/waflib/extras/build_file_tracker.py0000660000000000000000000000160213444661622024275 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2015 """ Force files to depend on the timestamps of those located in the build directory. You may want to use this to force partial rebuilds, see playground/track_output_files/ for a working example. Note that there is a variety of ways to implement this, one may want use timestamps on source files too for example, or one may want to hash the files in the source directory only under certain conditions (md5_tstamp tool) or to hash the file in the build directory with its timestamp """ import os from waflib import Node, Utils def get_bld_sig(self): if not self.is_bld() or self.ctx.bldnode is self.ctx.srcnode: return Utils.h_file(self.abspath()) try: # add the creation time to the signature return self.sig + str(os.stat(self.abspath()).st_mtime) except AttributeError: return None Node.Node.get_bld_sig = get_bld_sig tdb-1.4.2/third_party/waf/waflib/extras/build_logs.py0000660000000000000000000000541013444661622022610 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2013 (ita) """ A system for recording all outputs to a log file. Just add the following to your wscript file:: def init(ctx): ctx.load('build_logs') """ import atexit, sys, time, os, shutil, threading from waflib import ansiterm, Logs, Context # adding the logs under the build/ directory will clash with the clean/ command try: up = os.path.dirname(Context.g_module.__file__) except AttributeError: up = '.' LOGFILE = os.path.join(up, 'logs', time.strftime('%Y_%m_%d_%H_%M.log')) wlock = threading.Lock() class log_to_file(object): def __init__(self, stream, fileobj, filename): self.stream = stream self.encoding = self.stream.encoding self.fileobj = fileobj self.filename = filename self.is_valid = True def replace_colors(self, data): for x in Logs.colors_lst.values(): if isinstance(x, str): data = data.replace(x, '') return data def write(self, data): try: wlock.acquire() self.stream.write(data) self.stream.flush() if self.is_valid: self.fileobj.write(self.replace_colors(data)) finally: wlock.release() def fileno(self): return self.stream.fileno() def flush(self): self.stream.flush() if self.is_valid: self.fileobj.flush() def isatty(self): return self.stream.isatty() def init(ctx): global LOGFILE filename = os.path.abspath(LOGFILE) try: os.makedirs(os.path.dirname(os.path.abspath(filename))) except OSError: pass if hasattr(os, 'O_NOINHERIT'): fd = os.open(LOGFILE, os.O_CREAT | os.O_TRUNC | os.O_WRONLY | os.O_NOINHERIT) fileobj = os.fdopen(fd, 'w') else: fileobj = open(LOGFILE, 'w') old_stderr = sys.stderr # sys.stdout has already been replaced, so __stdout__ will be faster #sys.stdout = log_to_file(sys.stdout, fileobj, filename) #sys.stderr = log_to_file(sys.stderr, fileobj, filename) def wrap(stream): if stream.isatty(): return ansiterm.AnsiTerm(stream) return stream sys.stdout = log_to_file(wrap(sys.__stdout__), fileobj, filename) sys.stderr = log_to_file(wrap(sys.__stderr__), fileobj, filename) # now mess with the logging module... for x in Logs.log.handlers: try: stream = x.stream except AttributeError: pass else: if id(stream) == id(old_stderr): x.stream = sys.stderr def exit_cleanup(): try: fileobj = sys.stdout.fileobj except AttributeError: pass else: sys.stdout.is_valid = False sys.stderr.is_valid = False fileobj.close() filename = sys.stdout.filename Logs.info('Output logged to %r', filename) # then copy the log file to "latest.log" if possible up = os.path.dirname(os.path.abspath(filename)) try: shutil.copy(filename, os.path.join(up, 'latest.log')) except OSError: # this may fail on windows due to processes spawned pass atexit.register(exit_cleanup) tdb-1.4.2/third_party/waf/waflib/extras/buildcopy.py0000660000000000000000000000526013527011455022455 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Calle Rosenquist, 2017 (xbreak) """ Create task that copies source files to the associated build node. This is useful to e.g. construct a complete Python package so it can be unit tested without installation. Source files to be copied can be specified either in `buildcopy_source` attribute, or `source` attribute. If both are specified `buildcopy_source` has priority. Examples:: def build(bld): bld(name = 'bar', features = 'py buildcopy', source = bld.path.ant_glob('src/bar/*.py')) bld(name = 'py baz', features = 'buildcopy', buildcopy_source = bld.path.ant_glob('src/bar/*.py') + ['src/bar/resource.txt']) """ import os, shutil from waflib import Errors, Task, TaskGen, Utils, Node, Logs @TaskGen.before_method('process_source') @TaskGen.feature('buildcopy') def make_buildcopy(self): """ Creates the buildcopy task. """ def to_src_nodes(lst): """Find file nodes only in src, TaskGen.to_nodes will not work for this since it gives preference to nodes in build. """ if isinstance(lst, Node.Node): if not lst.is_src(): raise Errors.WafError('buildcopy: node %s is not in src'%lst) if not os.path.isfile(lst.abspath()): raise Errors.WafError('buildcopy: Cannot copy directory %s (unsupported action)'%lst) return lst if isinstance(lst, str): lst = [x for x in Utils.split_path(lst) if x and x != '.'] node = self.bld.path.get_src().search_node(lst) if node: if not os.path.isfile(node.abspath()): raise Errors.WafError('buildcopy: Cannot copy directory %s (unsupported action)'%node) return node node = self.bld.path.get_src().find_node(lst) if node: if not os.path.isfile(node.abspath()): raise Errors.WafError('buildcopy: Cannot copy directory %s (unsupported action)'%node) return node raise Errors.WafError('buildcopy: File not found in src: %s'%os.path.join(*lst)) nodes = [ to_src_nodes(n) for n in getattr(self, 'buildcopy_source', getattr(self, 'source', [])) ] if not nodes: Logs.warn('buildcopy: No source files provided to buildcopy in %s (set `buildcopy_source` or `source`)', self) return node_pairs = [(n, n.get_bld()) for n in nodes] self.create_task('buildcopy', [n[0] for n in node_pairs], [n[1] for n in node_pairs], node_pairs=node_pairs) class buildcopy(Task.Task): """ Copy for each pair `n` in `node_pairs`: n[0] -> n[1]. Attribute `node_pairs` should contain a list of tuples describing source and target: node_pairs = [(in, out), ...] """ color = 'PINK' def keyword(self): return 'Copying' def run(self): for f,t in self.node_pairs: t.parent.mkdir() shutil.copy2(f.abspath(), t.abspath()) tdb-1.4.2/third_party/waf/waflib/extras/c_bgxlc.py0000660000000000000000000000130213444661622022062 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # harald at klimachs.de """ IBM XL Compiler for Blue Gene """ from waflib.Tools import ccroot,ar from waflib.Configure import conf from waflib.Tools import xlc # method xlc_common_flags from waflib.Tools.compiler_c import c_compiler c_compiler['linux'].append('c_bgxlc') @conf def find_bgxlc(conf): cc = conf.find_program(['bgxlc_r','bgxlc'], var='CC') conf.get_xlc_version(cc) conf.env.CC = cc conf.env.CC_NAME = 'bgxlc' def configure(conf): conf.find_bgxlc() conf.find_ar() conf.xlc_common_flags() conf.env.LINKFLAGS_cshlib = ['-G','-Wl,-bexpfull'] conf.env.LINKFLAGS_cprogram = [] conf.cc_load_tools() conf.cc_add_flags() conf.link_add_flags() tdb-1.4.2/third_party/waf/waflib/extras/c_dumbpreproc.py0000660000000000000000000000316213444661622023313 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2010 (ita) """ Dumb C/C++ preprocessor for finding dependencies It will look at all include files it can find after removing the comments, so the following will always add the dependency on both "a.h" and "b.h":: #include "a.h" #ifdef B #include "b.h" #endif int main() { return 0; } To use:: def configure(conf): conf.load('compiler_c') conf.load('c_dumbpreproc') """ import re from waflib.Tools import c_preproc re_inc = re.compile( '^[ \t]*(#|%:)[ \t]*(include)[ \t]*[<"](.*)[>"]\r*$', re.IGNORECASE | re.MULTILINE) def lines_includes(node): code = node.read() if c_preproc.use_trigraphs: for (a, b) in c_preproc.trig_def: code = code.split(a).join(b) code = c_preproc.re_nl.sub('', code) code = c_preproc.re_cpp.sub(c_preproc.repl, code) return [(m.group(2), m.group(3)) for m in re.finditer(re_inc, code)] parser = c_preproc.c_parser class dumb_parser(parser): def addlines(self, node): if node in self.nodes[:-1]: return self.currentnode_stack.append(node.parent) # Avoid reading the same files again try: lines = self.parse_cache[node] except KeyError: lines = self.parse_cache[node] = lines_includes(node) self.lines = lines + [(c_preproc.POPFILE, '')] + self.lines def start(self, node, env): try: self.parse_cache = node.ctx.parse_cache except AttributeError: self.parse_cache = node.ctx.parse_cache = {} self.addlines(node) while self.lines: (x, y) = self.lines.pop(0) if x == c_preproc.POPFILE: self.currentnode_stack.pop() continue self.tryfind(y) c_preproc.c_parser = dumb_parser tdb-1.4.2/third_party/waf/waflib/extras/c_emscripten.py0000660000000000000000000000474013444661622023145 0ustar rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 vi:ts=4:noexpandtab import subprocess, shlex, sys from waflib.Tools import ccroot, gcc, gxx from waflib.Configure import conf from waflib.TaskGen import after_method, feature from waflib.Tools.compiler_c import c_compiler from waflib.Tools.compiler_cxx import cxx_compiler for supported_os in ('linux', 'darwin', 'gnu', 'aix'): c_compiler[supported_os].append('c_emscripten') cxx_compiler[supported_os].append('c_emscripten') @conf def get_emscripten_version(conf, cc): """ Emscripten doesn't support processing '-' like clang/gcc """ dummy = conf.cachedir.parent.make_node("waf-emscripten.c") dummy.write("") cmd = cc + ['-dM', '-E', '-x', 'c', dummy.abspath()] env = conf.env.env or None try: p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) out = p.communicate()[0] except Exception as e: conf.fatal('Could not determine emscripten version %r: %s' % (cmd, e)) if not isinstance(out, str): out = out.decode(sys.stdout.encoding or 'latin-1') k = {} out = out.splitlines() for line in out: lst = shlex.split(line) if len(lst)>2: key = lst[1] val = lst[2] k[key] = val if not ('__clang__' in k and 'EMSCRIPTEN' in k): conf.fatal('Could not determine the emscripten compiler version.') conf.env.DEST_OS = 'generic' conf.env.DEST_BINFMT = 'elf' conf.env.DEST_CPU = 'asm-js' conf.env.CC_VERSION = (k['__clang_major__'], k['__clang_minor__'], k['__clang_patchlevel__']) return k @conf def find_emscripten(conf): cc = conf.find_program(['emcc'], var='CC') conf.get_emscripten_version(cc) conf.env.CC = cc conf.env.CC_NAME = 'emscripten' cxx = conf.find_program(['em++'], var='CXX') conf.env.CXX = cxx conf.env.CXX_NAME = 'emscripten' conf.find_program(['emar'], var='AR') def configure(conf): conf.find_emscripten() conf.find_ar() conf.gcc_common_flags() conf.gxx_common_flags() conf.cc_load_tools() conf.cc_add_flags() conf.cxx_load_tools() conf.cxx_add_flags() conf.link_add_flags() conf.env.ARFLAGS = ['rcs'] conf.env.cshlib_PATTERN = '%s.js' conf.env.cxxshlib_PATTERN = '%s.js' conf.env.cstlib_PATTERN = '%s.a' conf.env.cxxstlib_PATTERN = '%s.a' conf.env.cprogram_PATTERN = '%s.html' conf.env.cxxprogram_PATTERN = '%s.html' conf.env.CXX_TGT_F = ['-c', '-o', ''] conf.env.CC_TGT_F = ['-c', '-o', ''] conf.env.CXXLNK_TGT_F = ['-o', ''] conf.env.CCLNK_TGT_F = ['-o', ''] conf.env.append_value('LINKFLAGS',['-Wl,--enable-auto-import']) tdb-1.4.2/third_party/waf/waflib/extras/c_nec.py0000660000000000000000000000337513444661622021544 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # harald at klimachs.de """ NEC SX Compiler for SX vector systems """ import re from waflib import Utils from waflib.Tools import ccroot,ar from waflib.Configure import conf from waflib.Tools import xlc # method xlc_common_flags from waflib.Tools.compiler_c import c_compiler c_compiler['linux'].append('c_nec') @conf def find_sxc(conf): cc = conf.find_program(['sxcc'], var='CC') conf.get_sxc_version(cc) conf.env.CC = cc conf.env.CC_NAME = 'sxcc' @conf def get_sxc_version(conf, fc): version_re = re.compile(r"C\+\+/SX\s*Version\s*(?P\d*)\.(?P\d*)", re.I).search cmd = fc + ['-V'] p = Utils.subprocess.Popen(cmd, stdin=False, stdout=Utils.subprocess.PIPE, stderr=Utils.subprocess.PIPE, env=None) out, err = p.communicate() if out: match = version_re(out) else: match = version_re(err) if not match: conf.fatal('Could not determine the NEC C compiler version.') k = match.groupdict() conf.env['C_VERSION'] = (k['major'], k['minor']) @conf def sxc_common_flags(conf): v=conf.env v['CC_SRC_F']=[] v['CC_TGT_F']=['-c','-o'] if not v['LINK_CC']: v['LINK_CC']=v['CC'] v['CCLNK_SRC_F']=[] v['CCLNK_TGT_F']=['-o'] v['CPPPATH_ST']='-I%s' v['DEFINES_ST']='-D%s' v['LIB_ST']='-l%s' v['LIBPATH_ST']='-L%s' v['STLIB_ST']='-l%s' v['STLIBPATH_ST']='-L%s' v['RPATH_ST']='' v['SONAME_ST']=[] v['SHLIB_MARKER']=[] v['STLIB_MARKER']=[] v['LINKFLAGS_cprogram']=[''] v['cprogram_PATTERN']='%s' v['CFLAGS_cshlib']=['-fPIC'] v['LINKFLAGS_cshlib']=[''] v['cshlib_PATTERN']='lib%s.so' v['LINKFLAGS_cstlib']=[] v['cstlib_PATTERN']='lib%s.a' def configure(conf): conf.find_sxc() conf.find_program('sxar',VAR='AR') conf.sxc_common_flags() conf.cc_load_tools() conf.cc_add_flags() conf.link_add_flags() tdb-1.4.2/third_party/waf/waflib/extras/cabal.py0000660000000000000000000001205513444661622021532 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Anton Feldmann, 2012 # "Base for cabal" from waflib import Task, Utils from waflib.TaskGen import extension from waflib.Utils import threading from shutil import rmtree lock = threading.Lock() registering = False def configure(self): self.find_program('cabal', var='CABAL') self.find_program('ghc-pkg', var='GHCPKG') pkgconfd = self.bldnode.abspath() + '/package.conf.d' self.env.PREFIX = self.bldnode.abspath() + '/dist' self.env.PKGCONFD = pkgconfd if self.root.find_node(pkgconfd + '/package.cache'): self.msg('Using existing package database', pkgconfd, color='CYAN') else: pkgdir = self.root.find_dir(pkgconfd) if pkgdir: self.msg('Deleting corrupt package database', pkgdir.abspath(), color ='RED') rmtree(pkgdir.abspath()) pkgdir = None self.cmd_and_log(self.env.GHCPKG + ['init', pkgconfd]) self.msg('Created package database', pkgconfd, color = 'YELLOW' if pkgdir else 'GREEN') @extension('.cabal') def process_cabal(self, node): out_dir_node = self.bld.root.find_dir(self.bld.out_dir) package_node = node.change_ext('.package') package_node = out_dir_node.find_or_declare(package_node.name) build_node = node.parent.get_bld() build_path = build_node.abspath() config_node = build_node.find_or_declare('setup-config') inplace_node = build_node.find_or_declare('package.conf.inplace') config_task = self.create_task('cabal_configure', node) config_task.cwd = node.parent.abspath() config_task.depends_on = getattr(self, 'depends_on', '') config_task.build_path = build_path config_task.set_outputs(config_node) build_task = self.create_task('cabal_build', config_node) build_task.cwd = node.parent.abspath() build_task.build_path = build_path build_task.set_outputs(inplace_node) copy_task = self.create_task('cabal_copy', inplace_node) copy_task.cwd = node.parent.abspath() copy_task.depends_on = getattr(self, 'depends_on', '') copy_task.build_path = build_path last_task = copy_task task_list = [config_task, build_task, copy_task] if (getattr(self, 'register', False)): register_task = self.create_task('cabal_register', inplace_node) register_task.cwd = node.parent.abspath() register_task.set_run_after(copy_task) register_task.build_path = build_path pkgreg_task = self.create_task('ghcpkg_register', inplace_node) pkgreg_task.cwd = node.parent.abspath() pkgreg_task.set_run_after(register_task) pkgreg_task.build_path = build_path last_task = pkgreg_task task_list += [register_task, pkgreg_task] touch_task = self.create_task('cabal_touch', inplace_node) touch_task.set_run_after(last_task) touch_task.set_outputs(package_node) touch_task.build_path = build_path task_list += [touch_task] return task_list def get_all_src_deps(node): hs_deps = node.ant_glob('**/*.hs') hsc_deps = node.ant_glob('**/*.hsc') lhs_deps = node.ant_glob('**/*.lhs') c_deps = node.ant_glob('**/*.c') cpp_deps = node.ant_glob('**/*.cpp') proto_deps = node.ant_glob('**/*.proto') return sum([hs_deps, hsc_deps, lhs_deps, c_deps, cpp_deps, proto_deps], []) class Cabal(Task.Task): def scan(self): return (get_all_src_deps(self.generator.path), ()) class cabal_configure(Cabal): run_str = '${CABAL} configure -v0 --prefix=${PREFIX} --global --user --package-db=${PKGCONFD} --builddir=${tsk.build_path}' shell = True def scan(self): out_node = self.generator.bld.root.find_dir(self.generator.bld.out_dir) deps = [out_node.find_or_declare(dep).change_ext('.package') for dep in Utils.to_list(self.depends_on)] return (deps, ()) class cabal_build(Cabal): run_str = '${CABAL} build -v1 --builddir=${tsk.build_path}/' shell = True class cabal_copy(Cabal): run_str = '${CABAL} copy -v0 --builddir=${tsk.build_path}' shell = True class cabal_register(Cabal): run_str = '${CABAL} register -v0 --gen-pkg-config=${tsk.build_path}/pkg.config --builddir=${tsk.build_path}' shell = True class ghcpkg_register(Cabal): run_str = '${GHCPKG} update -v0 --global --user --package-conf=${PKGCONFD} ${tsk.build_path}/pkg.config' shell = True def runnable_status(self): global lock, registering val = False lock.acquire() val = registering lock.release() if val: return Task.ASK_LATER ret = Task.Task.runnable_status(self) if ret == Task.RUN_ME: lock.acquire() registering = True lock.release() return ret def post_run(self): global lock, registering lock.acquire() registering = False lock.release() return Task.Task.post_run(self) class cabal_touch(Cabal): run_str = 'touch ${TGT}' tdb-1.4.2/third_party/waf/waflib/extras/cfg_altoptions.py0000660000000000000000000000541113444661622023501 0ustar rootroot00000000000000#!/usr/bin/python # -*- coding: utf-8 -*- # Tool to extend c_config.check_cfg() __author__ = __maintainer__ = "Jérôme Carretero " __copyright__ = "Jérôme Carretero, 2014" """ This tool allows to work around the absence of ``*-config`` programs on systems, by keeping the same clean configuration syntax but inferring values or permitting their modification via the options interface. Note that pkg-config can also support setting ``PKG_CONFIG_PATH``, so you can put custom files in a folder containing new .pc files. This tool could also be implemented by taking advantage of this fact. Usage:: def options(opt): opt.load('c_config_alt') opt.add_package_option('package') def configure(cfg): conf.load('c_config_alt') conf.check_cfg(...) Known issues: - Behavior with different build contexts... """ import os import functools from waflib import Configure, Options, Errors def name_to_dest(x): return x.lower().replace('-', '_') def options(opt): def x(opt, param): dest = name_to_dest(param) gr = opt.get_option_group("configure options") gr.add_option('--%s-root' % dest, help="path containing include and lib subfolders for %s" \ % param, ) opt.add_package_option = functools.partial(x, opt) check_cfg_old = getattr(Configure.ConfigurationContext, 'check_cfg') @Configure.conf def check_cfg(conf, *k, **kw): if k: lst = k[0].split() kw['package'] = lst[0] kw['args'] = ' '.join(lst[1:]) if not 'package' in kw: return check_cfg_old(conf, **kw) package = kw['package'] package_lo = name_to_dest(package) package_hi = package.upper().replace('-', '_') # TODO FIXME package_hi = kw.get('uselib_store', package_hi) def check_folder(path, name): try: assert os.path.isdir(path) except AssertionError: raise Errors.ConfigurationError( "%s_%s (%s) is not a folder!" \ % (package_lo, name, path)) return path root = getattr(Options.options, '%s_root' % package_lo, None) if root is None: return check_cfg_old(conf, **kw) else: def add_manual_var(k, v): conf.start_msg('Adding for %s a manual var' % (package)) conf.env["%s_%s" % (k, package_hi)] = v conf.end_msg("%s = %s" % (k, v)) check_folder(root, 'root') pkg_inc = check_folder(os.path.join(root, "include"), 'inc') add_manual_var('INCLUDES', [pkg_inc]) pkg_lib = check_folder(os.path.join(root, "lib"), 'libpath') add_manual_var('LIBPATH', [pkg_lib]) add_manual_var('LIB', [package]) for x in kw.get('manual_deps', []): for k, v in sorted(conf.env.get_merged_dict().items()): if k.endswith('_%s' % x): k = k.replace('_%s' % x, '') conf.start_msg('Adding for %s a manual dep' \ %(package)) conf.env["%s_%s" % (k, package_hi)] += v conf.end_msg('%s += %s' % (k, v)) return True tdb-1.4.2/third_party/waf/waflib/extras/clang_compilation_database.py0000660000000000000000000000511213444661622025772 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Christoph Koke, 2013 """ Writes the c and cpp compile commands into build/compile_commands.json see http://clang.llvm.org/docs/JSONCompilationDatabase.html Usage: def configure(conf): conf.load('compiler_cxx') ... conf.load('clang_compilation_database') """ import sys, os, json, shlex, pipes from waflib import Logs, TaskGen, Task Task.Task.keep_last_cmd = True @TaskGen.feature('c', 'cxx') @TaskGen.after_method('process_use') def collect_compilation_db_tasks(self): "Add a compilation database entry for compiled tasks" try: clang_db = self.bld.clang_compilation_database_tasks except AttributeError: clang_db = self.bld.clang_compilation_database_tasks = [] self.bld.add_post_fun(write_compilation_database) tup = tuple(y for y in [Task.classes.get(x) for x in ('c', 'cxx')] if y) for task in getattr(self, 'compiled_tasks', []): if isinstance(task, tup): clang_db.append(task) def write_compilation_database(ctx): "Write the clang compilation database as JSON" database_file = ctx.bldnode.make_node('compile_commands.json') Logs.info('Build commands will be stored in %s', database_file.path_from(ctx.path)) try: root = json.load(database_file) except IOError: root = [] clang_db = dict((x['file'], x) for x in root) for task in getattr(ctx, 'clang_compilation_database_tasks', []): try: cmd = task.last_cmd except AttributeError: continue directory = getattr(task, 'cwd', ctx.variant_dir) f_node = task.inputs[0] filename = os.path.relpath(f_node.abspath(), directory) entry = { "directory": directory, "arguments": cmd, "file": filename, } clang_db[filename] = entry root = list(clang_db.values()) database_file.write(json.dumps(root, indent=2)) # Override the runnable_status function to do a dummy/dry run when the file doesn't need to be compiled. # This will make sure compile_commands.json is always fully up to date. # Previously you could end up with a partial compile_commands.json if the build failed. for x in ('c', 'cxx'): if x not in Task.classes: continue t = Task.classes[x] def runnable_status(self): def exec_command(cmd, **kw): pass run_status = self.old_runnable_status() if run_status == Task.SKIP_ME: setattr(self, 'old_exec_command', getattr(self, 'exec_command', None)) setattr(self, 'exec_command', exec_command) self.run() setattr(self, 'exec_command', getattr(self, 'old_exec_command', None)) return run_status setattr(t, 'old_runnable_status', getattr(t, 'runnable_status', None)) setattr(t, 'runnable_status', runnable_status) tdb-1.4.2/third_party/waf/waflib/extras/clang_cross.py0000660000000000000000000000476613527011455022772 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Krzysztof Kosiński 2014 # DragoonX6 2018 """ Detect the Clang C compiler This version is an attempt at supporting the -target and -sysroot flag of Clang. """ from waflib.Tools import ccroot, ar, gcc from waflib.Configure import conf import waflib.Context import waflib.extras.clang_cross_common def options(opt): """ Target triplet for clang:: $ waf configure --clang-target-triple=x86_64-pc-linux-gnu """ cc_compiler_opts = opt.add_option_group('Configuration options') cc_compiler_opts.add_option('--clang-target-triple', default=None, help='Target triple for clang', dest='clang_target_triple') cc_compiler_opts.add_option('--clang-sysroot', default=None, help='Sysroot for clang', dest='clang_sysroot') @conf def find_clang(conf): """ Finds the program clang and executes it to ensure it really is clang """ import os cc = conf.find_program('clang', var='CC') if conf.options.clang_target_triple != None: conf.env.append_value('CC', ['-target', conf.options.clang_target_triple]) if conf.options.clang_sysroot != None: sysroot = str() if os.path.isabs(conf.options.clang_sysroot): sysroot = conf.options.clang_sysroot else: sysroot = os.path.normpath(os.path.join(os.getcwd(), conf.options.clang_sysroot)) conf.env.append_value('CC', ['--sysroot', sysroot]) conf.get_cc_version(cc, clang=True) conf.env.CC_NAME = 'clang' @conf def clang_modifier_x86_64_w64_mingw32(conf): conf.gcc_modifier_win32() @conf def clang_modifier_i386_w64_mingw32(conf): conf.gcc_modifier_win32() @conf def clang_modifier_x86_64_windows_msvc(conf): conf.clang_modifier_msvc() # Allow the user to override any flags if they so desire. clang_modifier_user_func = getattr(conf, 'clang_modifier_x86_64_windows_msvc_user', None) if clang_modifier_user_func: clang_modifier_user_func() @conf def clang_modifier_i386_windows_msvc(conf): conf.clang_modifier_msvc() # Allow the user to override any flags if they so desire. clang_modifier_user_func = getattr(conf, 'clang_modifier_i386_windows_msvc_user', None) if clang_modifier_user_func: clang_modifier_user_func() def configure(conf): conf.find_clang() conf.find_program(['llvm-ar', 'ar'], var='AR') conf.find_ar() conf.gcc_common_flags() # Allow the user to provide flags for the target platform. conf.gcc_modifier_platform() # And allow more fine grained control based on the compiler's triplet. conf.clang_modifier_target_triple() conf.cc_load_tools() conf.cc_add_flags() conf.link_add_flags() tdb-1.4.2/third_party/waf/waflib/extras/clang_cross_common.py0000660000000000000000000000654213527011455024334 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # DragoonX6 2018 """ Common routines for cross_clang.py and cross_clangxx.py """ from waflib.Configure import conf import waflib.Context def normalize_target_triple(target_triple): target_triple = target_triple[:-1] normalized_triple = target_triple.replace('--', '-unknown-') if normalized_triple.startswith('-'): normalized_triple = 'unknown' + normalized_triple if normalized_triple.endswith('-'): normalized_triple += 'unknown' # Normalize MinGW builds to *arch*-w64-mingw32 if normalized_triple.endswith('windows-gnu'): normalized_triple = normalized_triple[:normalized_triple.index('-')] + '-w64-mingw32' # Strip the vendor when doing msvc builds, since it's unused anyway. if normalized_triple.endswith('windows-msvc'): normalized_triple = normalized_triple[:normalized_triple.index('-')] + '-windows-msvc' return normalized_triple.replace('-', '_') @conf def clang_modifier_msvc(conf): import os """ Really basic setup to use clang in msvc mode. We actually don't really want to do a lot, even though clang is msvc compatible in this mode, that doesn't mean we're actually using msvc. It's probably the best to leave it to the user, we can assume msvc mode if the user uses the clang-cl frontend, but this module only concerns itself with the gcc-like frontend. """ v = conf.env v.cprogram_PATTERN = '%s.exe' v.cshlib_PATTERN = '%s.dll' v.implib_PATTERN = '%s.lib' v.IMPLIB_ST = '-Wl,-IMPLIB:%s' v.SHLIB_MARKER = [] v.CFLAGS_cshlib = [] v.LINKFLAGS_cshlib = ['-Wl,-DLL'] v.cstlib_PATTERN = '%s.lib' v.STLIB_MARKER = [] del(v.AR) conf.find_program(['llvm-lib', 'lib'], var='AR') v.ARFLAGS = ['-nologo'] v.AR_TGT_F = ['-out:'] # Default to the linker supplied with llvm instead of link.exe or ld v.LINK_CC = v.CC + ['-fuse-ld=lld', '-nostdlib'] v.CCLNK_TGT_F = ['-o'] v.def_PATTERN = '-Wl,-def:%s' v.LINKFLAGS = [] v.LIB_ST = '-l%s' v.LIBPATH_ST = '-Wl,-LIBPATH:%s' v.STLIB_ST = '-l%s' v.STLIBPATH_ST = '-Wl,-LIBPATH:%s' CFLAGS_CRT_COMMON = [ '-Xclang', '--dependent-lib=oldnames', '-Xclang', '-fno-rtti-data', '-D_MT' ] v.CFLAGS_CRT_MULTITHREADED = CFLAGS_CRT_COMMON + [ '-Xclang', '-flto-visibility-public-std', '-Xclang', '--dependent-lib=libcmt', ] v.CXXFLAGS_CRT_MULTITHREADED = v.CFLAGS_CRT_MULTITHREADED v.CFLAGS_CRT_MULTITHREADED_DBG = CFLAGS_CRT_COMMON + [ '-D_DEBUG', '-Xclang', '-flto-visibility-public-std', '-Xclang', '--dependent-lib=libcmtd', ] v.CXXFLAGS_CRT_MULTITHREADED_DBG = v.CFLAGS_CRT_MULTITHREADED_DBG v.CFLAGS_CRT_MULTITHREADED_DLL = CFLAGS_CRT_COMMON + [ '-D_DLL', '-Xclang', '--dependent-lib=msvcrt' ] v.CXXFLAGS_CRT_MULTITHREADED_DLL = v.CFLAGS_CRT_MULTITHREADED_DLL v.CFLAGS_CRT_MULTITHREADED_DLL_DBG = CFLAGS_CRT_COMMON + [ '-D_DLL', '-D_DEBUG', '-Xclang', '--dependent-lib=msvcrtd', ] v.CXXFLAGS_CRT_MULTITHREADED_DLL_DBG = v.CFLAGS_CRT_MULTITHREADED_DLL_DBG @conf def clang_modifier_target_triple(conf, cpp=False): compiler = conf.env.CXX if cpp else conf.env.CC output = conf.cmd_and_log(compiler + ['-dumpmachine'], output=waflib.Context.STDOUT) modifier = ('clangxx' if cpp else 'clang') + '_modifier_' clang_modifier_func = getattr(conf, modifier + normalize_target_triple(output), None) if clang_modifier_func: clang_modifier_func() tdb-1.4.2/third_party/waf/waflib/extras/clangxx_cross.py0000660000000000000000000000571213527011455023342 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy 2009-2018 (ita) # DragoonX6 2018 """ Detect the Clang++ C++ compiler This version is an attempt at supporting the -target and -sysroot flag of Clang++. """ from waflib.Tools import ccroot, ar, gxx from waflib.Configure import conf import waflib.extras.clang_cross_common def options(opt): """ Target triplet for clang++:: $ waf configure --clangxx-target-triple=x86_64-pc-linux-gnu """ cxx_compiler_opts = opt.add_option_group('Configuration options') cxx_compiler_opts.add_option('--clangxx-target-triple', default=None, help='Target triple for clang++', dest='clangxx_target_triple') cxx_compiler_opts.add_option('--clangxx-sysroot', default=None, help='Sysroot for clang++', dest='clangxx_sysroot') @conf def find_clangxx(conf): """ Finds the program clang++, and executes it to ensure it really is clang++ """ import os cxx = conf.find_program('clang++', var='CXX') if conf.options.clangxx_target_triple != None: conf.env.append_value('CXX', ['-target', conf.options.clangxx_target_triple]) if conf.options.clangxx_sysroot != None: sysroot = str() if os.path.isabs(conf.options.clangxx_sysroot): sysroot = conf.options.clangxx_sysroot else: sysroot = os.path.normpath(os.path.join(os.getcwd(), conf.options.clangxx_sysroot)) conf.env.append_value('CXX', ['--sysroot', sysroot]) conf.get_cc_version(cxx, clang=True) conf.env.CXX_NAME = 'clang' @conf def clangxx_modifier_x86_64_w64_mingw32(conf): conf.gcc_modifier_win32() @conf def clangxx_modifier_i386_w64_mingw32(conf): conf.gcc_modifier_win32() @conf def clangxx_modifier_msvc(conf): v = conf.env v.cxxprogram_PATTERN = v.cprogram_PATTERN v.cxxshlib_PATTERN = v.cshlib_PATTERN v.CXXFLAGS_cxxshlib = [] v.LINKFLAGS_cxxshlib = v.LINKFLAGS_cshlib v.cxxstlib_PATTERN = v.cstlib_PATTERN v.LINK_CXX = v.CXX + ['-fuse-ld=lld', '-nostdlib'] v.CXXLNK_TGT_F = v.CCLNK_TGT_F @conf def clangxx_modifier_x86_64_windows_msvc(conf): conf.clang_modifier_msvc() conf.clangxx_modifier_msvc() # Allow the user to override any flags if they so desire. clang_modifier_user_func = getattr(conf, 'clangxx_modifier_x86_64_windows_msvc_user', None) if clang_modifier_user_func: clang_modifier_user_func() @conf def clangxx_modifier_i386_windows_msvc(conf): conf.clang_modifier_msvc() conf.clangxx_modifier_msvc() # Allow the user to override any flags if they so desire. clang_modifier_user_func = getattr(conf, 'clangxx_modifier_i386_windows_msvc_user', None) if clang_modifier_user_func: clang_modifier_user_func() def configure(conf): conf.find_clangxx() conf.find_program(['llvm-ar', 'ar'], var='AR') conf.find_ar() conf.gxx_common_flags() # Allow the user to provide flags for the target platform. conf.gxx_modifier_platform() # And allow more fine grained control based on the compiler's triplet. conf.clang_modifier_target_triple(cpp=True) conf.cxx_load_tools() conf.cxx_add_flags() conf.link_add_flags() tdb-1.4.2/third_party/waf/waflib/extras/codelite.py0000660000000000000000000010214013444661622022253 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # CodeLite Project # Christian Klein (chrikle@berlios.de) # Created: Jan 2012 # As templete for this file I used the msvs.py # I hope this template will work proper """ Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ """ To add this tool to your project: def options(conf): opt.load('codelite') It can be a good idea to add the sync_exec tool too. To generate solution files: $ waf configure codelite To customize the outputs, provide subclasses in your wscript files: from waflib.extras import codelite class vsnode_target(codelite.vsnode_target): def get_build_command(self, props): # likely to be required return "waf.bat build" def collect_source(self): # likely to be required ... class codelite_bar(codelite.codelite_generator): def init(self): codelite.codelite_generator.init(self) self.vsnode_target = vsnode_target The codelite class re-uses the same build() function for reading the targets (task generators), you may therefore specify codelite settings on the context object: def build(bld): bld.codelite_solution_name = 'foo.workspace' bld.waf_command = 'waf.bat' bld.projects_dir = bld.srcnode.make_node('') bld.projects_dir.mkdir() ASSUMPTIONS: * a project can be either a directory or a target, project files are written only for targets that have source files * each project is a vcxproj file, therefore the project uuid needs only to be a hash of the absolute path """ import os, re, sys import uuid # requires python 2.5 from waflib.Build import BuildContext from waflib import Utils, TaskGen, Logs, Task, Context, Node, Options HEADERS_GLOB = '**/(*.h|*.hpp|*.H|*.inl)' PROJECT_TEMPLATE = r''' ${for x in project.source} ${if (project.get_key(x)=="sourcefile")} ${endif} ${endfor} ${for x in project.source} ${if (project.get_key(x)=="headerfile")} ${endif} ${endfor} $b = project.build_properties[0]} ${xml:project.get_rebuild_command(project.build_properties[0])} ${xml:project.get_clean_command(project.build_properties[0])} ${xml:project.get_build_command(project.build_properties[0])} ${xml:project.get_install_command(project.build_properties[0])} ${xml:project.get_build_and_install_command(project.build_properties[0])} ${xml:project.get_build_all_command(project.build_properties[0])} ${xml:project.get_rebuild_all_command(project.build_properties[0])} ${xml:project.get_clean_all_command(project.build_properties[0])} ${xml:project.get_build_and_install_all_command(project.build_properties[0])} None ''' SOLUTION_TEMPLATE = ''' ${for p in project.all_projects} ${endfor} ${for p in project.all_projects} ${endfor} ''' COMPILE_TEMPLATE = '''def f(project): lst = [] def xml_escape(value): return value.replace("&", "&").replace('"', """).replace("'", "'").replace("<", "<").replace(">", ">") %s #f = open('cmd.txt', 'w') #f.write(str(lst)) #f.close() return ''.join(lst) ''' reg_act = re.compile(r"(?P\\)|(?P\$\$)|(?P\$\{(?P[^}]*?)\})", re.M) def compile_template(line): """ Compile a template expression into a python function (like jsps, but way shorter) """ extr = [] def repl(match): g = match.group if g('dollar'): return "$" elif g('backslash'): return "\\" elif g('subst'): extr.append(g('code')) return "<<|@|>>" return None line2 = reg_act.sub(repl, line) params = line2.split('<<|@|>>') assert(extr) indent = 0 buf = [] app = buf.append def app(txt): buf.append(indent * '\t' + txt) for x in range(len(extr)): if params[x]: app("lst.append(%r)" % params[x]) f = extr[x] if f.startswith(('if', 'for')): app(f + ':') indent += 1 elif f.startswith('py:'): app(f[3:]) elif f.startswith(('endif', 'endfor')): indent -= 1 elif f.startswith(('else', 'elif')): indent -= 1 app(f + ':') indent += 1 elif f.startswith('xml:'): app('lst.append(xml_escape(%s))' % f[4:]) else: #app('lst.append((%s) or "cannot find %s")' % (f, f)) app('lst.append(%s)' % f) if extr: if params[-1]: app("lst.append(%r)" % params[-1]) fun = COMPILE_TEMPLATE % "\n\t".join(buf) #print(fun) return Task.funex(fun) re_blank = re.compile('(\n|\r|\\s)*\n', re.M) def rm_blank_lines(txt): txt = re_blank.sub('\r\n', txt) return txt BOM = '\xef\xbb\xbf' try: BOM = bytes(BOM, 'latin-1') # python 3 except (TypeError, NameError): pass def stealth_write(self, data, flags='wb'): try: unicode except NameError: data = data.encode('utf-8') # python 3 else: data = data.decode(sys.getfilesystemencoding(), 'replace') data = data.encode('utf-8') if self.name.endswith('.project'): data = BOM + data try: txt = self.read(flags='rb') if txt != data: raise ValueError('must write') except (IOError, ValueError): self.write(data, flags=flags) else: Logs.debug('codelite: skipping %r', self) Node.Node.stealth_write = stealth_write re_quote = re.compile("[^a-zA-Z0-9-]") def quote(s): return re_quote.sub("_", s) def xml_escape(value): return value.replace("&", "&").replace('"', """).replace("'", "'").replace("<", "<").replace(">", ">") def make_uuid(v, prefix = None): """ simple utility function """ if isinstance(v, dict): keys = list(v.keys()) keys.sort() tmp = str([(k, v[k]) for k in keys]) else: tmp = str(v) d = Utils.md5(tmp.encode()).hexdigest().upper() if prefix: d = '%s%s' % (prefix, d[8:]) gid = uuid.UUID(d, version = 4) return str(gid).upper() def diff(node, fromnode): # difference between two nodes, but with "(..)" instead of ".." c1 = node c2 = fromnode c1h = c1.height() c2h = c2.height() lst = [] up = 0 while c1h > c2h: lst.append(c1.name) c1 = c1.parent c1h -= 1 while c2h > c1h: up += 1 c2 = c2.parent c2h -= 1 while id(c1) != id(c2): lst.append(c1.name) up += 1 c1 = c1.parent c2 = c2.parent for i in range(up): lst.append('(..)') lst.reverse() return tuple(lst) class build_property(object): pass class vsnode(object): """ Abstract class representing visual studio elements We assume that all visual studio nodes have a uuid and a parent """ def __init__(self, ctx): self.ctx = ctx # codelite context self.name = '' # string, mandatory self.vspath = '' # path in visual studio (name for dirs, absolute path for projects) self.uuid = '' # string, mandatory self.parent = None # parent node for visual studio nesting def get_waf(self): """ Override in subclasses... """ return '%s/%s' % (self.ctx.srcnode.abspath(), getattr(self.ctx, 'waf_command', 'waf')) def ptype(self): """ Return a special uuid for projects written in the solution file """ pass def write(self): """ Write the project file, by default, do nothing """ pass def make_uuid(self, val): """ Alias for creating uuid values easily (the templates cannot access global variables) """ return make_uuid(val) class vsnode_vsdir(vsnode): """ Nodes representing visual studio folders (which do not match the filesystem tree!) """ VS_GUID_SOLUTIONFOLDER = "2150E333-8FDC-42A3-9474-1A3956D46DE8" def __init__(self, ctx, uuid, name, vspath=''): vsnode.__init__(self, ctx) self.title = self.name = name self.uuid = uuid self.vspath = vspath or name def ptype(self): return self.VS_GUID_SOLUTIONFOLDER class vsnode_project(vsnode): """ Abstract class representing visual studio project elements A project is assumed to be writable, and has a node representing the file to write to """ VS_GUID_VCPROJ = "8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942" def ptype(self): return self.VS_GUID_VCPROJ def __init__(self, ctx, node): vsnode.__init__(self, ctx) self.path = node self.uuid = make_uuid(node.abspath()) self.name = node.name self.title = self.path.abspath() self.source = [] # list of node objects self.build_properties = [] # list of properties (nmake commands, output dir, etc) def dirs(self): """ Get the list of parent folders of the source files (header files included) for writing the filters """ lst = [] def add(x): if x.height() > self.tg.path.height() and x not in lst: lst.append(x) add(x.parent) for x in self.source: add(x.parent) return lst def write(self): Logs.debug('codelite: creating %r', self.path) #print "self.name:",self.name # first write the project file template1 = compile_template(PROJECT_TEMPLATE) proj_str = template1(self) proj_str = rm_blank_lines(proj_str) self.path.stealth_write(proj_str) # then write the filter #template2 = compile_template(FILTER_TEMPLATE) #filter_str = template2(self) #filter_str = rm_blank_lines(filter_str) #tmp = self.path.parent.make_node(self.path.name + '.filters') #tmp.stealth_write(filter_str) def get_key(self, node): """ required for writing the source files """ name = node.name if name.endswith(('.cpp', '.c')): return 'sourcefile' return 'headerfile' def collect_properties(self): """ Returns a list of triplet (configuration, platform, output_directory) """ ret = [] for c in self.ctx.configurations: for p in self.ctx.platforms: x = build_property() x.outdir = '' x.configuration = c x.platform = p x.preprocessor_definitions = '' x.includes_search_path = '' # can specify "deploy_dir" too ret.append(x) self.build_properties = ret def get_build_params(self, props): opt = '' return (self.get_waf(), opt) def get_build_command(self, props): return "%s build %s" % self.get_build_params(props) def get_clean_command(self, props): return "%s clean %s" % self.get_build_params(props) def get_rebuild_command(self, props): return "%s clean build %s" % self.get_build_params(props) def get_install_command(self, props): return "%s install %s" % self.get_build_params(props) def get_build_and_install_command(self, props): return "%s build install %s" % self.get_build_params(props) def get_build_and_install_all_command(self, props): return "%s build install" % self.get_build_params(props)[0] def get_clean_all_command(self, props): return "%s clean" % self.get_build_params(props)[0] def get_build_all_command(self, props): return "%s build" % self.get_build_params(props)[0] def get_rebuild_all_command(self, props): return "%s clean build" % self.get_build_params(props)[0] def get_filter_name(self, node): lst = diff(node, self.tg.path) return '\\'.join(lst) or '.' class vsnode_alias(vsnode_project): def __init__(self, ctx, node, name): vsnode_project.__init__(self, ctx, node) self.name = name self.output_file = '' class vsnode_build_all(vsnode_alias): """ Fake target used to emulate the behaviour of "make all" (starting one process by target is slow) This is the only alias enabled by default """ def __init__(self, ctx, node, name='build_all_projects'): vsnode_alias.__init__(self, ctx, node, name) self.is_active = True class vsnode_install_all(vsnode_alias): """ Fake target used to emulate the behaviour of "make install" """ def __init__(self, ctx, node, name='install_all_projects'): vsnode_alias.__init__(self, ctx, node, name) def get_build_command(self, props): return "%s build install %s" % self.get_build_params(props) def get_clean_command(self, props): return "%s clean %s" % self.get_build_params(props) def get_rebuild_command(self, props): return "%s clean build install %s" % self.get_build_params(props) class vsnode_project_view(vsnode_alias): """ Fake target used to emulate a file system view """ def __init__(self, ctx, node, name='project_view'): vsnode_alias.__init__(self, ctx, node, name) self.tg = self.ctx() # fake one, cannot remove self.exclude_files = Node.exclude_regs + ''' waf-2* waf3-2*/** .waf-2* .waf3-2*/** **/*.sdf **/*.suo **/*.ncb **/%s ''' % Options.lockfile def collect_source(self): # this is likely to be slow self.source = self.ctx.srcnode.ant_glob('**', excl=self.exclude_files) def get_build_command(self, props): params = self.get_build_params(props) + (self.ctx.cmd,) return "%s %s %s" % params def get_clean_command(self, props): return "" def get_rebuild_command(self, props): return self.get_build_command(props) class vsnode_target(vsnode_project): """ CodeLite project representing a targets (programs, libraries, etc) and bound to a task generator """ def __init__(self, ctx, tg): """ A project is more or less equivalent to a file/folder """ base = getattr(ctx, 'projects_dir', None) or tg.path node = base.make_node(quote(tg.name) + ctx.project_extension) # the project file as a Node vsnode_project.__init__(self, ctx, node) self.name = quote(tg.name) self.tg = tg # task generator def get_build_params(self, props): """ Override the default to add the target name """ opt = '' if getattr(self, 'tg', None): opt += " --targets=%s" % self.tg.name return (self.get_waf(), opt) def collect_source(self): tg = self.tg source_files = tg.to_nodes(getattr(tg, 'source', [])) include_dirs = Utils.to_list(getattr(tg, 'codelite_includes', [])) include_files = [] for x in include_dirs: if isinstance(x, str): x = tg.path.find_node(x) if x: lst = [y for y in x.ant_glob(HEADERS_GLOB, flat=False)] include_files.extend(lst) # remove duplicates self.source.extend(list(set(source_files + include_files))) self.source.sort(key=lambda x: x.abspath()) def collect_properties(self): """ CodeLite projects are associated with platforms and configurations (for building especially) """ super(vsnode_target, self).collect_properties() for x in self.build_properties: x.outdir = self.path.parent.abspath() x.preprocessor_definitions = '' x.includes_search_path = '' try: tsk = self.tg.link_task except AttributeError: pass else: x.output_file = tsk.outputs[0].abspath() x.preprocessor_definitions = ';'.join(tsk.env.DEFINES) x.includes_search_path = ';'.join(self.tg.env.INCPATHS) class codelite_generator(BuildContext): '''generates a CodeLite workspace''' cmd = 'codelite' fun = 'build' def init(self): """ Some data that needs to be present """ if not getattr(self, 'configurations', None): self.configurations = ['Release'] # LocalRelease, RemoteDebug, etc if not getattr(self, 'platforms', None): self.platforms = ['Win32'] if not getattr(self, 'all_projects', None): self.all_projects = [] if not getattr(self, 'project_extension', None): self.project_extension = '.project' if not getattr(self, 'projects_dir', None): self.projects_dir = self.srcnode.make_node('') self.projects_dir.mkdir() # bind the classes to the object, so that subclass can provide custom generators if not getattr(self, 'vsnode_vsdir', None): self.vsnode_vsdir = vsnode_vsdir if not getattr(self, 'vsnode_target', None): self.vsnode_target = vsnode_target if not getattr(self, 'vsnode_build_all', None): self.vsnode_build_all = vsnode_build_all if not getattr(self, 'vsnode_install_all', None): self.vsnode_install_all = vsnode_install_all if not getattr(self, 'vsnode_project_view', None): self.vsnode_project_view = vsnode_project_view self.numver = '11.00' self.vsver = '2010' def execute(self): """ Entry point """ self.restore() if not self.all_envs: self.load_envs() self.recurse([self.run_dir]) # user initialization self.init() # two phases for creating the solution self.collect_projects() # add project objects into "self.all_projects" self.write_files() # write the corresponding project and solution files def collect_projects(self): """ Fill the list self.all_projects with project objects Fill the list of build targets """ self.collect_targets() #self.add_aliases() #self.collect_dirs() default_project = getattr(self, 'default_project', None) def sortfun(x): if x.name == default_project: return '' return getattr(x, 'path', None) and x.path.abspath() or x.name self.all_projects.sort(key=sortfun) def write_files(self): """ Write the project and solution files from the data collected so far. It is unlikely that you will want to change this """ for p in self.all_projects: p.write() # and finally write the solution file node = self.get_solution_node() node.parent.mkdir() Logs.warn('Creating %r', node) #a = dir(self.root) #for b in a: # print b #print self.group_names #print "Hallo2: ",self.root.listdir() #print getattr(self, 'codelite_solution_name', None) template1 = compile_template(SOLUTION_TEMPLATE) sln_str = template1(self) sln_str = rm_blank_lines(sln_str) node.stealth_write(sln_str) def get_solution_node(self): """ The solution filename is required when writing the .vcproj files return self.solution_node and if it does not exist, make one """ try: return self.solution_node except: pass codelite_solution_name = getattr(self, 'codelite_solution_name', None) if not codelite_solution_name: codelite_solution_name = getattr(Context.g_module, Context.APPNAME, 'project') + '.workspace' setattr(self, 'codelite_solution_name', codelite_solution_name) if os.path.isabs(codelite_solution_name): self.solution_node = self.root.make_node(codelite_solution_name) else: self.solution_node = self.srcnode.make_node(codelite_solution_name) return self.solution_node def project_configurations(self): """ Helper that returns all the pairs (config,platform) """ ret = [] for c in self.configurations: for p in self.platforms: ret.append((c, p)) return ret def collect_targets(self): """ Process the list of task generators """ for g in self.groups: for tg in g: if not isinstance(tg, TaskGen.task_gen): continue if not hasattr(tg, 'codelite_includes'): tg.codelite_includes = tg.to_list(getattr(tg, 'includes', [])) + tg.to_list(getattr(tg, 'export_includes', [])) tg.post() if not getattr(tg, 'link_task', None): continue p = self.vsnode_target(self, tg) p.collect_source() # delegate this processing p.collect_properties() self.all_projects.append(p) def add_aliases(self): """ Add a specific target that emulates the "make all" necessary for Visual studio when pressing F7 We also add an alias for "make install" (disabled by default) """ base = getattr(self, 'projects_dir', None) or self.tg.path node_project = base.make_node('build_all_projects' + self.project_extension) # Node p_build = self.vsnode_build_all(self, node_project) p_build.collect_properties() self.all_projects.append(p_build) node_project = base.make_node('install_all_projects' + self.project_extension) # Node p_install = self.vsnode_install_all(self, node_project) p_install.collect_properties() self.all_projects.append(p_install) node_project = base.make_node('project_view' + self.project_extension) # Node p_view = self.vsnode_project_view(self, node_project) p_view.collect_source() p_view.collect_properties() self.all_projects.append(p_view) n = self.vsnode_vsdir(self, make_uuid(self.srcnode.abspath() + 'build_aliases'), "build_aliases") p_build.parent = p_install.parent = p_view.parent = n self.all_projects.append(n) def collect_dirs(self): """ Create the folder structure in the CodeLite project view """ seen = {} def make_parents(proj): # look at a project, try to make a parent if getattr(proj, 'parent', None): # aliases already have parents return x = proj.iter_path if x in seen: proj.parent = seen[x] return # There is not vsnode_vsdir for x. # So create a project representing the folder "x" n = proj.parent = seen[x] = self.vsnode_vsdir(self, make_uuid(x.abspath()), x.name) n.iter_path = x.parent self.all_projects.append(n) # recurse up to the project directory if x.height() > self.srcnode.height() + 1: make_parents(n) for p in self.all_projects[:]: # iterate over a copy of all projects if not getattr(p, 'tg', None): # but only projects that have a task generator continue # make a folder for each task generator p.iter_path = p.tg.path make_parents(p) tdb-1.4.2/third_party/waf/waflib/extras/color_gcc.py0000660000000000000000000000216613444661622022424 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Replaces the default formatter by one which understands GCC output and colorizes it. __author__ = __maintainer__ = "Jérôme Carretero " __copyright__ = "Jérôme Carretero, 2012" import sys from waflib import Logs class ColorGCCFormatter(Logs.formatter): def __init__(self, colors): self.colors = colors Logs.formatter.__init__(self) def format(self, rec): frame = sys._getframe() while frame: func = frame.f_code.co_name if func == 'exec_command': cmd = frame.f_locals.get('cmd') if isinstance(cmd, list) and ('gcc' in cmd[0] or 'g++' in cmd[0]): lines = [] for line in rec.msg.splitlines(): if 'warning: ' in line: lines.append(self.colors.YELLOW + line) elif 'error: ' in line: lines.append(self.colors.RED + line) elif 'note: ' in line: lines.append(self.colors.CYAN + line) else: lines.append(line) rec.msg = "\n".join(lines) frame = frame.f_back return Logs.formatter.format(self, rec) def options(opt): Logs.log.handlers[0].setFormatter(ColorGCCFormatter(Logs.colors)) tdb-1.4.2/third_party/waf/waflib/extras/color_msvc.py0000660000000000000000000000343513527011455022633 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Replaces the default formatter by one which understands MSVC output and colorizes it. # Modified from color_gcc.py __author__ = __maintainer__ = "Alibek Omarov " __copyright__ = "Alibek Omarov, 2019" import sys from waflib import Logs class ColorMSVCFormatter(Logs.formatter): def __init__(self, colors): self.colors = colors Logs.formatter.__init__(self) def parseMessage(self, line, color): # Split messaage from 'disk:filepath: type: message' arr = line.split(':', 3) if len(arr) < 4: return line colored = self.colors.BOLD + arr[0] + ':' + arr[1] + ':' + self.colors.NORMAL colored += color + arr[2] + ':' + self.colors.NORMAL colored += arr[3] return colored def format(self, rec): frame = sys._getframe() while frame: func = frame.f_code.co_name if func == 'exec_command': cmd = frame.f_locals.get('cmd') if isinstance(cmd, list): # Fix file case, it may be CL.EXE or cl.exe argv0 = cmd[0].lower() if 'cl.exe' in argv0: lines = [] # This will not work with "localized" versions # of MSVC for line in rec.msg.splitlines(): if ': warning ' in line: lines.append(self.parseMessage(line, self.colors.YELLOW)) elif ': error ' in line: lines.append(self.parseMessage(line, self.colors.RED)) elif ': fatal error ' in line: lines.append(self.parseMessage(line, self.colors.RED + self.colors.BOLD)) elif ': note: ' in line: lines.append(self.parseMessage(line, self.colors.CYAN)) else: lines.append(line) rec.msg = "\n".join(lines) frame = frame.f_back return Logs.formatter.format(self, rec) def options(opt): Logs.log.handlers[0].setFormatter(ColorMSVCFormatter(Logs.colors)) tdb-1.4.2/third_party/waf/waflib/extras/color_rvct.py0000660000000000000000000000244513444661622022646 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Replaces the default formatter by one which understands RVCT output and colorizes it. __author__ = __maintainer__ = "Jérôme Carretero " __copyright__ = "Jérôme Carretero, 2012" import sys import atexit from waflib import Logs errors = [] def show_errors(): for i, e in enumerate(errors): if i > 5: break print("Error: %s" % e) atexit.register(show_errors) class RcvtFormatter(Logs.formatter): def __init__(self, colors): Logs.formatter.__init__(self) self.colors = colors def format(self, rec): frame = sys._getframe() while frame: func = frame.f_code.co_name if func == 'exec_command': cmd = frame.f_locals['cmd'] if isinstance(cmd, list) and ('armcc' in cmd[0] or 'armld' in cmd[0]): lines = [] for line in rec.msg.splitlines(): if 'Warning: ' in line: lines.append(self.colors.YELLOW + line) elif 'Error: ' in line: lines.append(self.colors.RED + line) errors.append(line) elif 'note: ' in line: lines.append(self.colors.CYAN + line) else: lines.append(line) rec.msg = "\n".join(lines) frame = frame.f_back return Logs.formatter.format(self, rec) def options(opt): Logs.log.handlers[0].setFormatter(RcvtFormatter(Logs.colors)) tdb-1.4.2/third_party/waf/waflib/extras/compat15.py0000660000000000000000000002704513444661622022126 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2010 (ita) """ This file is provided to enable compatibility with waf 1.5 It was enabled by default in waf 1.6, but it is not used in waf 1.7 """ import sys from waflib import ConfigSet, Logs, Options, Scripting, Task, Build, Configure, Node, Runner, TaskGen, Utils, Errors, Context # the following is to bring some compatibility with waf 1.5 "import waflib.Configure → import Configure" sys.modules['Environment'] = ConfigSet ConfigSet.Environment = ConfigSet.ConfigSet sys.modules['Logs'] = Logs sys.modules['Options'] = Options sys.modules['Scripting'] = Scripting sys.modules['Task'] = Task sys.modules['Build'] = Build sys.modules['Configure'] = Configure sys.modules['Node'] = Node sys.modules['Runner'] = Runner sys.modules['TaskGen'] = TaskGen sys.modules['Utils'] = Utils sys.modules['Constants'] = Context Context.SRCDIR = '' Context.BLDDIR = '' from waflib.Tools import c_preproc sys.modules['preproc'] = c_preproc from waflib.Tools import c_config sys.modules['config_c'] = c_config ConfigSet.ConfigSet.copy = ConfigSet.ConfigSet.derive ConfigSet.ConfigSet.set_variant = Utils.nada Utils.pproc = Utils.subprocess Build.BuildContext.add_subdirs = Build.BuildContext.recurse Build.BuildContext.new_task_gen = Build.BuildContext.__call__ Build.BuildContext.is_install = 0 Node.Node.relpath_gen = Node.Node.path_from Utils.pproc = Utils.subprocess Utils.get_term_cols = Logs.get_term_cols def cmd_output(cmd, **kw): silent = False if 'silent' in kw: silent = kw['silent'] del(kw['silent']) if 'e' in kw: tmp = kw['e'] del(kw['e']) kw['env'] = tmp kw['shell'] = isinstance(cmd, str) kw['stdout'] = Utils.subprocess.PIPE if silent: kw['stderr'] = Utils.subprocess.PIPE try: p = Utils.subprocess.Popen(cmd, **kw) output = p.communicate()[0] except OSError as e: raise ValueError(str(e)) if p.returncode: if not silent: msg = "command execution failed: %s -> %r" % (cmd, str(output)) raise ValueError(msg) output = '' return output Utils.cmd_output = cmd_output def name_to_obj(self, s, env=None): if Logs.verbose: Logs.warn('compat: change "name_to_obj(name, env)" by "get_tgen_by_name(name)"') return self.get_tgen_by_name(s) Build.BuildContext.name_to_obj = name_to_obj def env_of_name(self, name): try: return self.all_envs[name] except KeyError: Logs.error('no such environment: '+name) return None Build.BuildContext.env_of_name = env_of_name def set_env_name(self, name, env): self.all_envs[name] = env return env Configure.ConfigurationContext.set_env_name = set_env_name def retrieve(self, name, fromenv=None): try: env = self.all_envs[name] except KeyError: env = ConfigSet.ConfigSet() self.prepare_env(env) self.all_envs[name] = env else: if fromenv: Logs.warn('The environment %s may have been configured already', name) return env Configure.ConfigurationContext.retrieve = retrieve Configure.ConfigurationContext.sub_config = Configure.ConfigurationContext.recurse Configure.ConfigurationContext.check_tool = Configure.ConfigurationContext.load Configure.conftest = Configure.conf Configure.ConfigurationError = Errors.ConfigurationError Utils.WafError = Errors.WafError Options.OptionsContext.sub_options = Options.OptionsContext.recurse Options.OptionsContext.tool_options = Context.Context.load Options.Handler = Options.OptionsContext Task.simple_task_type = Task.task_type_from_func = Task.task_factory Task.Task.classes = Task.classes def setitem(self, key, value): if key.startswith('CCFLAGS'): key = key[1:] self.table[key] = value ConfigSet.ConfigSet.__setitem__ = setitem @TaskGen.feature('d') @TaskGen.before('apply_incpaths') def old_importpaths(self): if getattr(self, 'importpaths', []): self.includes = self.importpaths from waflib import Context eld = Context.load_tool def load_tool(*k, **kw): ret = eld(*k, **kw) if 'set_options' in ret.__dict__: if Logs.verbose: Logs.warn('compat: rename "set_options" to options') ret.options = ret.set_options if 'detect' in ret.__dict__: if Logs.verbose: Logs.warn('compat: rename "detect" to "configure"') ret.configure = ret.detect return ret Context.load_tool = load_tool def get_curdir(self): return self.path.abspath() Context.Context.curdir = property(get_curdir, Utils.nada) def get_srcdir(self): return self.srcnode.abspath() Configure.ConfigurationContext.srcdir = property(get_srcdir, Utils.nada) def get_blddir(self): return self.bldnode.abspath() Configure.ConfigurationContext.blddir = property(get_blddir, Utils.nada) Configure.ConfigurationContext.check_message_1 = Configure.ConfigurationContext.start_msg Configure.ConfigurationContext.check_message_2 = Configure.ConfigurationContext.end_msg rev = Context.load_module def load_module(path, encoding=None): ret = rev(path, encoding) if 'set_options' in ret.__dict__: if Logs.verbose: Logs.warn('compat: rename "set_options" to "options" (%r)', path) ret.options = ret.set_options if 'srcdir' in ret.__dict__: if Logs.verbose: Logs.warn('compat: rename "srcdir" to "top" (%r)', path) ret.top = ret.srcdir if 'blddir' in ret.__dict__: if Logs.verbose: Logs.warn('compat: rename "blddir" to "out" (%r)', path) ret.out = ret.blddir Utils.g_module = Context.g_module Options.launch_dir = Context.launch_dir return ret Context.load_module = load_module old_post = TaskGen.task_gen.post def post(self): self.features = self.to_list(self.features) if 'cc' in self.features: if Logs.verbose: Logs.warn('compat: the feature cc does not exist anymore (use "c")') self.features.remove('cc') self.features.append('c') if 'cstaticlib' in self.features: if Logs.verbose: Logs.warn('compat: the feature cstaticlib does not exist anymore (use "cstlib" or "cxxstlib")') self.features.remove('cstaticlib') self.features.append(('cxx' in self.features) and 'cxxstlib' or 'cstlib') if getattr(self, 'ccflags', None): if Logs.verbose: Logs.warn('compat: "ccflags" was renamed to "cflags"') self.cflags = self.ccflags return old_post(self) TaskGen.task_gen.post = post def waf_version(*k, **kw): Logs.warn('wrong version (waf_version was removed in waf 1.6)') Utils.waf_version = waf_version import os @TaskGen.feature('c', 'cxx', 'd') @TaskGen.before('apply_incpaths', 'propagate_uselib_vars') @TaskGen.after('apply_link', 'process_source') def apply_uselib_local(self): """ process the uselib_local attribute execute after apply_link because of the execution order set on 'link_task' """ env = self.env from waflib.Tools.ccroot import stlink_task # 1. the case of the libs defined in the project (visit ancestors first) # the ancestors external libraries (uselib) will be prepended self.uselib = self.to_list(getattr(self, 'uselib', [])) self.includes = self.to_list(getattr(self, 'includes', [])) names = self.to_list(getattr(self, 'uselib_local', [])) get = self.bld.get_tgen_by_name seen = set() seen_uselib = set() tmp = Utils.deque(names) # consume a copy of the list of names if tmp: if Logs.verbose: Logs.warn('compat: "uselib_local" is deprecated, replace by "use"') while tmp: lib_name = tmp.popleft() # visit dependencies only once if lib_name in seen: continue y = get(lib_name) y.post() seen.add(lib_name) # object has ancestors to process (shared libraries): add them to the end of the list if getattr(y, 'uselib_local', None): for x in self.to_list(getattr(y, 'uselib_local', [])): obj = get(x) obj.post() if getattr(obj, 'link_task', None): if not isinstance(obj.link_task, stlink_task): tmp.append(x) # link task and flags if getattr(y, 'link_task', None): link_name = y.target[y.target.rfind(os.sep) + 1:] if isinstance(y.link_task, stlink_task): env.append_value('STLIB', [link_name]) else: # some linkers can link against programs env.append_value('LIB', [link_name]) # the order self.link_task.set_run_after(y.link_task) # for the recompilation self.link_task.dep_nodes += y.link_task.outputs # add the link path too tmp_path = y.link_task.outputs[0].parent.bldpath() if not tmp_path in env['LIBPATH']: env.prepend_value('LIBPATH', [tmp_path]) # add ancestors uselib too - but only propagate those that have no staticlib defined for v in self.to_list(getattr(y, 'uselib', [])): if v not in seen_uselib: seen_uselib.add(v) if not env['STLIB_' + v]: if not v in self.uselib: self.uselib.insert(0, v) # if the library task generator provides 'export_includes', add to the include path # the export_includes must be a list of paths relative to the other library if getattr(y, 'export_includes', None): self.includes.extend(y.to_incnodes(y.export_includes)) @TaskGen.feature('cprogram', 'cxxprogram', 'cstlib', 'cxxstlib', 'cshlib', 'cxxshlib', 'dprogram', 'dstlib', 'dshlib') @TaskGen.after('apply_link') def apply_objdeps(self): "add the .o files produced by some other object files in the same manner as uselib_local" names = getattr(self, 'add_objects', []) if not names: return names = self.to_list(names) get = self.bld.get_tgen_by_name seen = [] while names: x = names[0] # visit dependencies only once if x in seen: names = names[1:] continue # object does not exist ? y = get(x) # object has ancestors to process first ? update the list of names if getattr(y, 'add_objects', None): added = 0 lst = y.to_list(y.add_objects) lst.reverse() for u in lst: if u in seen: continue added = 1 names = [u]+names if added: continue # list of names modified, loop # safe to process the current object y.post() seen.append(x) for t in getattr(y, 'compiled_tasks', []): self.link_task.inputs.extend(t.outputs) @TaskGen.after('apply_link') def process_obj_files(self): if not hasattr(self, 'obj_files'): return for x in self.obj_files: node = self.path.find_resource(x) self.link_task.inputs.append(node) @TaskGen.taskgen_method def add_obj_file(self, file): """Small example on how to link object files as if they were source obj = bld.create_obj('cc') obj.add_obj_file('foo.o')""" if not hasattr(self, 'obj_files'): self.obj_files = [] if not 'process_obj_files' in self.meths: self.meths.append('process_obj_files') self.obj_files.append(file) old_define = Configure.ConfigurationContext.__dict__['define'] @Configure.conf def define(self, key, val, quote=True, comment=''): old_define(self, key, val, quote, comment) if key.startswith('HAVE_'): self.env[key] = 1 old_undefine = Configure.ConfigurationContext.__dict__['undefine'] @Configure.conf def undefine(self, key, comment=''): old_undefine(self, key, comment) if key.startswith('HAVE_'): self.env[key] = 0 # some people might want to use export_incdirs, but it was renamed def set_incdirs(self, val): Logs.warn('compat: change "export_incdirs" by "export_includes"') self.export_includes = val TaskGen.task_gen.export_incdirs = property(None, set_incdirs) def install_dir(self, path): if not path: return [] destpath = Utils.subst_vars(path, self.env) if self.is_install > 0: Logs.info('* creating %s', destpath) Utils.check_dir(destpath) elif self.is_install < 0: Logs.info('* removing %s', destpath) try: os.remove(destpath) except OSError: pass Build.BuildContext.install_dir = install_dir # before/after names repl = {'apply_core': 'process_source', 'apply_lib_vars': 'process_source', 'apply_obj_vars': 'propagate_uselib_vars', 'exec_rule': 'process_rule' } def after(*k): k = [repl.get(key, key) for key in k] return TaskGen.after_method(*k) def before(*k): k = [repl.get(key, key) for key in k] return TaskGen.before_method(*k) TaskGen.before = before tdb-1.4.2/third_party/waf/waflib/extras/cppcheck.py0000660000000000000000000004320413527011455022243 0ustar rootroot00000000000000#! /usr/bin/env python # -*- encoding: utf-8 -*- # Michel Mooij, michel.mooij7@gmail.com """ Tool Description ================ This module provides a waf wrapper (i.e. waftool) around the C/C++ source code checking tool 'cppcheck'. See http://cppcheck.sourceforge.net/ for more information on the cppcheck tool itself. Note that many linux distributions already provide a ready to install version of cppcheck. On fedora, for instance, it can be installed using yum: 'sudo yum install cppcheck' Usage ===== In order to use this waftool simply add it to the 'options' and 'configure' functions of your main waf script as shown in the example below: def options(opt): opt.load('cppcheck', tooldir='./waftools') def configure(conf): conf.load('cppcheck') Note that example shown above assumes that the cppcheck waftool is located in the sub directory named 'waftools'. When configured as shown in the example above, cppcheck will automatically perform a source code analysis on all C/C++ build tasks that have been defined in your waf build system. The example shown below for a C program will be used as input for cppcheck when building the task. def build(bld): bld.program(name='foo', src='foobar.c') The result of the source code analysis will be stored both as xml and html files in the build location for the task. Should any error be detected by cppcheck the build will be aborted and a link to the html report will be shown. By default, one index.html file is created for each task generator. A global index.html file can be obtained by setting the following variable in the configuration section: conf.env.CPPCHECK_SINGLE_HTML = False When needed source code checking by cppcheck can be disabled per task, per detected error or warning for a particular task. It can be also be disabled for all tasks. In order to exclude a task from source code checking add the skip option to the task as shown below: def build(bld): bld.program( name='foo', src='foobar.c' cppcheck_skip=True ) When needed problems detected by cppcheck may be suppressed using a file containing a list of suppression rules. The relative or absolute path to this file can be added to the build task as shown in the example below: bld.program( name='bar', src='foobar.c', cppcheck_suppress='bar.suppress' ) A cppcheck suppress file should contain one suppress rule per line. Each of these rules will be passed as an '--suppress=' argument to cppcheck. Dependencies ================ This waftool depends on the python pygments module, it is used for source code syntax highlighting when creating the html reports. see http://pygments.org/ for more information on this package. Remarks ================ The generation of the html report is originally based on the cppcheck-htmlreport.py script that comes shipped with the cppcheck tool. """ import sys import xml.etree.ElementTree as ElementTree from waflib import Task, TaskGen, Logs, Context, Options PYGMENTS_EXC_MSG= ''' The required module 'pygments' could not be found. Please install it using your platform package manager (e.g. apt-get or yum), using 'pip' or 'easy_install', see 'http://pygments.org/download/' for installation instructions. ''' try: import pygments from pygments import formatters, lexers except ImportError as e: Logs.warn(PYGMENTS_EXC_MSG) raise e def options(opt): opt.add_option('--cppcheck-skip', dest='cppcheck_skip', default=False, action='store_true', help='do not check C/C++ sources (default=False)') opt.add_option('--cppcheck-err-resume', dest='cppcheck_err_resume', default=False, action='store_true', help='continue in case of errors (default=False)') opt.add_option('--cppcheck-bin-enable', dest='cppcheck_bin_enable', default='warning,performance,portability,style,unusedFunction', action='store', help="cppcheck option '--enable=' for binaries (default=warning,performance,portability,style,unusedFunction)") opt.add_option('--cppcheck-lib-enable', dest='cppcheck_lib_enable', default='warning,performance,portability,style', action='store', help="cppcheck option '--enable=' for libraries (default=warning,performance,portability,style)") opt.add_option('--cppcheck-std-c', dest='cppcheck_std_c', default='c99', action='store', help='cppcheck standard to use when checking C (default=c99)') opt.add_option('--cppcheck-std-cxx', dest='cppcheck_std_cxx', default='c++03', action='store', help='cppcheck standard to use when checking C++ (default=c++03)') opt.add_option('--cppcheck-check-config', dest='cppcheck_check_config', default=False, action='store_true', help='forced check for missing buildin include files, e.g. stdio.h (default=False)') opt.add_option('--cppcheck-max-configs', dest='cppcheck_max_configs', default='20', action='store', help='maximum preprocessor (--max-configs) define iterations (default=20)') opt.add_option('--cppcheck-jobs', dest='cppcheck_jobs', default='1', action='store', help='number of jobs (-j) to do the checking work (default=1)') def configure(conf): if conf.options.cppcheck_skip: conf.env.CPPCHECK_SKIP = [True] conf.env.CPPCHECK_STD_C = conf.options.cppcheck_std_c conf.env.CPPCHECK_STD_CXX = conf.options.cppcheck_std_cxx conf.env.CPPCHECK_MAX_CONFIGS = conf.options.cppcheck_max_configs conf.env.CPPCHECK_BIN_ENABLE = conf.options.cppcheck_bin_enable conf.env.CPPCHECK_LIB_ENABLE = conf.options.cppcheck_lib_enable conf.env.CPPCHECK_JOBS = conf.options.cppcheck_jobs if conf.options.cppcheck_jobs != '1' and ('unusedFunction' in conf.options.cppcheck_bin_enable or 'unusedFunction' in conf.options.cppcheck_lib_enable or 'all' in conf.options.cppcheck_bin_enable or 'all' in conf.options.cppcheck_lib_enable): Logs.warn('cppcheck: unusedFunction cannot be used with multiple threads, cppcheck will disable it automatically') conf.find_program('cppcheck', var='CPPCHECK') # set to True to get a single index.html file conf.env.CPPCHECK_SINGLE_HTML = False @TaskGen.feature('c') @TaskGen.feature('cxx') def cppcheck_execute(self): if hasattr(self.bld, 'conf'): return if len(self.env.CPPCHECK_SKIP) or Options.options.cppcheck_skip: return if getattr(self, 'cppcheck_skip', False): return task = self.create_task('cppcheck') task.cmd = _tgen_create_cmd(self) task.fatal = [] if not Options.options.cppcheck_err_resume: task.fatal.append('error') def _tgen_create_cmd(self): features = getattr(self, 'features', []) std_c = self.env.CPPCHECK_STD_C std_cxx = self.env.CPPCHECK_STD_CXX max_configs = self.env.CPPCHECK_MAX_CONFIGS bin_enable = self.env.CPPCHECK_BIN_ENABLE lib_enable = self.env.CPPCHECK_LIB_ENABLE jobs = self.env.CPPCHECK_JOBS cmd = self.env.CPPCHECK args = ['--inconclusive','--report-progress','--verbose','--xml','--xml-version=2'] args.append('--max-configs=%s' % max_configs) args.append('-j %s' % jobs) if 'cxx' in features: args.append('--language=c++') args.append('--std=%s' % std_cxx) else: args.append('--language=c') args.append('--std=%s' % std_c) if Options.options.cppcheck_check_config: args.append('--check-config') if set(['cprogram','cxxprogram']) & set(features): args.append('--enable=%s' % bin_enable) else: args.append('--enable=%s' % lib_enable) for src in self.to_list(getattr(self, 'source', [])): if not isinstance(src, str): src = repr(src) args.append(src) for inc in self.to_incnodes(self.to_list(getattr(self, 'includes', []))): if not isinstance(inc, str): inc = repr(inc) args.append('-I%s' % inc) for inc in self.to_incnodes(self.to_list(self.env.INCLUDES)): if not isinstance(inc, str): inc = repr(inc) args.append('-I%s' % inc) return cmd + args class cppcheck(Task.Task): quiet = True def run(self): stderr = self.generator.bld.cmd_and_log(self.cmd, quiet=Context.STDERR, output=Context.STDERR) self._save_xml_report(stderr) defects = self._get_defects(stderr) index = self._create_html_report(defects) self._errors_evaluate(defects, index) return 0 def _save_xml_report(self, s): '''use cppcheck xml result string, add the command string used to invoke cppcheck and save as xml file. ''' header = '%s\n' % s.splitlines()[0] root = ElementTree.fromstring(s) cmd = ElementTree.SubElement(root.find('cppcheck'), 'cmd') cmd.text = str(self.cmd) body = ElementTree.tostring(root).decode('us-ascii') body_html_name = 'cppcheck-%s.xml' % self.generator.get_name() if self.env.CPPCHECK_SINGLE_HTML: body_html_name = 'cppcheck.xml' node = self.generator.path.get_bld().find_or_declare(body_html_name) node.write(header + body) def _get_defects(self, xml_string): '''evaluate the xml string returned by cppcheck (on sdterr) and use it to create a list of defects. ''' defects = [] for error in ElementTree.fromstring(xml_string).iter('error'): defect = {} defect['id'] = error.get('id') defect['severity'] = error.get('severity') defect['msg'] = str(error.get('msg')).replace('<','<') defect['verbose'] = error.get('verbose') for location in error.findall('location'): defect['file'] = location.get('file') defect['line'] = str(int(location.get('line')) - 1) defects.append(defect) return defects def _create_html_report(self, defects): files, css_style_defs = self._create_html_files(defects) index = self._create_html_index(files) self._create_css_file(css_style_defs) return index def _create_html_files(self, defects): sources = {} defects = [defect for defect in defects if 'file' in defect] for defect in defects: name = defect['file'] if not name in sources: sources[name] = [defect] else: sources[name].append(defect) files = {} css_style_defs = None bpath = self.generator.path.get_bld().abspath() names = list(sources.keys()) for i in range(0,len(names)): name = names[i] if self.env.CPPCHECK_SINGLE_HTML: htmlfile = 'cppcheck/%i.html' % (i) else: htmlfile = 'cppcheck/%s%i.html' % (self.generator.get_name(),i) errors = sources[name] files[name] = { 'htmlfile': '%s/%s' % (bpath, htmlfile), 'errors': errors } css_style_defs = self._create_html_file(name, htmlfile, errors) return files, css_style_defs def _create_html_file(self, sourcefile, htmlfile, errors): name = self.generator.get_name() root = ElementTree.fromstring(CPPCHECK_HTML_FILE) title = root.find('head/title') title.text = 'cppcheck - report - %s' % name body = root.find('body') for div in body.findall('div'): if div.get('id') == 'page': page = div break for div in page.findall('div'): if div.get('id') == 'header': h1 = div.find('h1') h1.text = 'cppcheck report - %s' % name if div.get('id') == 'menu': indexlink = div.find('a') if self.env.CPPCHECK_SINGLE_HTML: indexlink.attrib['href'] = 'index.html' else: indexlink.attrib['href'] = 'index-%s.html' % name if div.get('id') == 'content': content = div srcnode = self.generator.bld.root.find_node(sourcefile) hl_lines = [e['line'] for e in errors if 'line' in e] formatter = CppcheckHtmlFormatter(linenos=True, style='colorful', hl_lines=hl_lines, lineanchors='line') formatter.errors = [e for e in errors if 'line' in e] css_style_defs = formatter.get_style_defs('.highlight') lexer = pygments.lexers.guess_lexer_for_filename(sourcefile, "") s = pygments.highlight(srcnode.read(), lexer, formatter) table = ElementTree.fromstring(s) content.append(table) s = ElementTree.tostring(root, method='html').decode('us-ascii') s = CCPCHECK_HTML_TYPE + s node = self.generator.path.get_bld().find_or_declare(htmlfile) node.write(s) return css_style_defs def _create_html_index(self, files): name = self.generator.get_name() root = ElementTree.fromstring(CPPCHECK_HTML_FILE) title = root.find('head/title') title.text = 'cppcheck - report - %s' % name body = root.find('body') for div in body.findall('div'): if div.get('id') == 'page': page = div break for div in page.findall('div'): if div.get('id') == 'header': h1 = div.find('h1') h1.text = 'cppcheck report - %s' % name if div.get('id') == 'content': content = div self._create_html_table(content, files) if div.get('id') == 'menu': indexlink = div.find('a') if self.env.CPPCHECK_SINGLE_HTML: indexlink.attrib['href'] = 'index.html' else: indexlink.attrib['href'] = 'index-%s.html' % name s = ElementTree.tostring(root, method='html').decode('us-ascii') s = CCPCHECK_HTML_TYPE + s index_html_name = 'cppcheck/index-%s.html' % name if self.env.CPPCHECK_SINGLE_HTML: index_html_name = 'cppcheck/index.html' node = self.generator.path.get_bld().find_or_declare(index_html_name) node.write(s) return node def _create_html_table(self, content, files): table = ElementTree.fromstring(CPPCHECK_HTML_TABLE) for name, val in files.items(): f = val['htmlfile'] s = '%s\n' % (f,name) row = ElementTree.fromstring(s) table.append(row) errors = sorted(val['errors'], key=lambda e: int(e['line']) if 'line' in e else sys.maxint) for e in errors: if not 'line' in e: s = '%s%s%s\n' % (e['id'], e['severity'], e['msg']) else: attr = '' if e['severity'] == 'error': attr = 'class="error"' s = '%s' % (f, e['line'], e['line']) s+= '%s%s%s\n' % (e['id'], e['severity'], attr, e['msg']) row = ElementTree.fromstring(s) table.append(row) content.append(table) def _create_css_file(self, css_style_defs): css = str(CPPCHECK_CSS_FILE) if css_style_defs: css = "%s\n%s\n" % (css, css_style_defs) node = self.generator.path.get_bld().find_or_declare('cppcheck/style.css') node.write(css) def _errors_evaluate(self, errors, http_index): name = self.generator.get_name() fatal = self.fatal severity = [err['severity'] for err in errors] problems = [err for err in errors if err['severity'] != 'information'] if set(fatal) & set(severity): exc = "\n" exc += "\nccpcheck detected fatal error(s) in task '%s', see report for details:" % name exc += "\n file://%r" % (http_index) exc += "\n" self.generator.bld.fatal(exc) elif len(problems): msg = "\nccpcheck detected (possible) problem(s) in task '%s', see report for details:" % name msg += "\n file://%r" % http_index msg += "\n" Logs.error(msg) class CppcheckHtmlFormatter(pygments.formatters.HtmlFormatter): errors = [] def wrap(self, source, outfile): line_no = 1 for i, t in super(CppcheckHtmlFormatter, self).wrap(source, outfile): # If this is a source code line we want to add a span tag at the end. if i == 1: for error in self.errors: if int(error['line']) == line_no: t = t.replace('\n', CPPCHECK_HTML_ERROR % error['msg']) line_no += 1 yield i, t CCPCHECK_HTML_TYPE = \ '\n' CPPCHECK_HTML_FILE = """ ]> cppcheck - report - XXX
 
""" CPPCHECK_HTML_TABLE = """
Line Id Severity Message
""" CPPCHECK_HTML_ERROR = \ '<--- %s\n' CPPCHECK_CSS_FILE = """ body.body { font-family: Arial; font-size: 13px; background-color: black; padding: 0px; margin: 0px; } .error { font-family: Arial; font-size: 13px; background-color: #ffb7b7; padding: 0px; margin: 0px; } th, td { min-width: 100px; text-align: left; } #page-header { clear: both; width: 1200px; margin: 20px auto 0px auto; height: 10px; border-bottom-width: 2px; border-bottom-style: solid; border-bottom-color: #aaaaaa; } #page { width: 1160px; margin: auto; border-left-width: 2px; border-left-style: solid; border-left-color: #aaaaaa; border-right-width: 2px; border-right-style: solid; border-right-color: #aaaaaa; background-color: White; padding: 20px; } #page-footer { clear: both; width: 1200px; margin: auto; height: 10px; border-top-width: 2px; border-top-style: solid; border-top-color: #aaaaaa; } #header { width: 100%; height: 70px; background-image: url(logo.png); background-repeat: no-repeat; background-position: left top; border-bottom-style: solid; border-bottom-width: thin; border-bottom-color: #aaaaaa; } #menu { margin-top: 5px; text-align: left; float: left; width: 100px; height: 300px; } #menu > a { margin-left: 10px; display: block; } #content { float: left; width: 1020px; margin: 5px; padding: 0px 10px 10px 10px; border-left-style: solid; border-left-width: thin; border-left-color: #aaaaaa; } #footer { padding-bottom: 5px; padding-top: 5px; border-top-style: solid; border-top-width: thin; border-top-color: #aaaaaa; clear: both; font-size: 10px; } #footer > div { float: left; width: 33%; } """ tdb-1.4.2/third_party/waf/waflib/extras/cpplint.py0000660000000000000000000001654213527011455022141 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # # written by Sylvain Rouquette, 2014 ''' This is an extra tool, not bundled with the default waf binary. To add the cpplint tool to the waf file: $ ./waf-light --tools=compat15,cpplint this tool also requires cpplint for python. If you have PIP, you can install it like this: pip install cpplint When using this tool, the wscript will look like: def options(opt): opt.load('compiler_cxx cpplint') def configure(conf): conf.load('compiler_cxx cpplint') # optional, you can also specify them on the command line conf.env.CPPLINT_FILTERS = ','.join(( '-whitespace/newline', # c++11 lambda '-readability/braces', # c++11 constructor '-whitespace/braces', # c++11 constructor '-build/storage_class', # c++11 for-range '-whitespace/blank_line', # user pref '-whitespace/labels' # user pref )) def build(bld): bld(features='cpplint', source='main.cpp', target='app') # add include files, because they aren't usually built bld(features='cpplint', source=bld.path.ant_glob('**/*.hpp')) ''' from __future__ import absolute_import import sys, re import logging from waflib import Errors, Task, TaskGen, Logs, Options, Node, Utils critical_errors = 0 CPPLINT_FORMAT = '[CPPLINT] %(filename)s:\nline %(linenum)s, severity %(confidence)s, category: %(category)s\n%(message)s\n' RE_EMACS = re.compile(r'(?P.*):(?P\d+): (?P.*) \[(?P.*)\] \[(?P\d+)\]') CPPLINT_RE = { 'waf': RE_EMACS, 'emacs': RE_EMACS, 'vs7': re.compile(r'(?P.*)\((?P\d+)\): (?P.*) \[(?P.*)\] \[(?P\d+)\]'), 'eclipse': re.compile(r'(?P.*):(?P\d+): warning: (?P.*) \[(?P.*)\] \[(?P\d+)\]'), } CPPLINT_STR = ('${CPPLINT} ' '--verbose=${CPPLINT_LEVEL} ' '--output=${CPPLINT_OUTPUT} ' '--filter=${CPPLINT_FILTERS} ' '--root=${CPPLINT_ROOT} ' '--linelength=${CPPLINT_LINE_LENGTH} ') def options(opt): opt.add_option('--cpplint-filters', type='string', default='', dest='CPPLINT_FILTERS', help='add filters to cpplint') opt.add_option('--cpplint-length', type='int', default=80, dest='CPPLINT_LINE_LENGTH', help='specify the line length (default: 80)') opt.add_option('--cpplint-level', default=1, type='int', dest='CPPLINT_LEVEL', help='specify the log level (default: 1)') opt.add_option('--cpplint-break', default=5, type='int', dest='CPPLINT_BREAK', help='break the build if error >= level (default: 5)') opt.add_option('--cpplint-root', type='string', default='', dest='CPPLINT_ROOT', help='root directory used to derive header guard') opt.add_option('--cpplint-skip', action='store_true', default=False, dest='CPPLINT_SKIP', help='skip cpplint during build') opt.add_option('--cpplint-output', type='string', default='waf', dest='CPPLINT_OUTPUT', help='select output format (waf, emacs, vs7, eclipse)') def configure(conf): try: conf.find_program('cpplint', var='CPPLINT') except Errors.ConfigurationError: conf.env.CPPLINT_SKIP = True class cpplint_formatter(Logs.formatter, object): def __init__(self, fmt): logging.Formatter.__init__(self, CPPLINT_FORMAT) self.fmt = fmt def format(self, rec): if self.fmt == 'waf': result = CPPLINT_RE[self.fmt].match(rec.msg).groupdict() rec.msg = CPPLINT_FORMAT % result if rec.levelno <= logging.INFO: rec.c1 = Logs.colors.CYAN return super(cpplint_formatter, self).format(rec) class cpplint_handler(Logs.log_handler, object): def __init__(self, stream=sys.stderr, **kw): super(cpplint_handler, self).__init__(stream, **kw) self.stream = stream def emit(self, rec): rec.stream = self.stream self.emit_override(rec) self.flush() class cpplint_wrapper(object): def __init__(self, logger, threshold, fmt): self.logger = logger self.threshold = threshold self.fmt = fmt def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): if isinstance(exc_value, Utils.subprocess.CalledProcessError): messages = [m for m in exc_value.output.splitlines() if 'Done processing' not in m and 'Total errors found' not in m] for message in messages: self.write(message) return True def write(self, message): global critical_errors result = CPPLINT_RE[self.fmt].match(message) if not result: return level = int(result.groupdict()['confidence']) if level >= self.threshold: critical_errors += 1 if level <= 2: self.logger.info(message) elif level <= 4: self.logger.warning(message) else: self.logger.error(message) cpplint_logger = None def get_cpplint_logger(fmt): global cpplint_logger if cpplint_logger: return cpplint_logger cpplint_logger = logging.getLogger('cpplint') hdlr = cpplint_handler() hdlr.setFormatter(cpplint_formatter(fmt)) cpplint_logger.addHandler(hdlr) cpplint_logger.setLevel(logging.DEBUG) return cpplint_logger class cpplint(Task.Task): color = 'PINK' def __init__(self, *k, **kw): super(cpplint, self).__init__(*k, **kw) def run(self): global critical_errors with cpplint_wrapper(get_cpplint_logger(self.env.CPPLINT_OUTPUT), self.env.CPPLINT_BREAK, self.env.CPPLINT_OUTPUT): params = {key: str(self.env[key]) for key in self.env if 'CPPLINT_' in key} if params['CPPLINT_OUTPUT'] is 'waf': params['CPPLINT_OUTPUT'] = 'emacs' params['CPPLINT'] = self.env.get_flat('CPPLINT') cmd = Utils.subst_vars(CPPLINT_STR, params) env = self.env.env or None Utils.subprocess.check_output(cmd + self.inputs[0].abspath(), stderr=Utils.subprocess.STDOUT, env=env, shell=True) return critical_errors @TaskGen.extension('.h', '.hh', '.hpp', '.hxx') def cpplint_includes(self, node): pass @TaskGen.feature('cpplint') @TaskGen.before_method('process_source') def post_cpplint(self): if not self.env.CPPLINT_INITIALIZED: for key, value in Options.options.__dict__.items(): if not key.startswith('CPPLINT_') or self.env[key]: continue self.env[key] = value self.env.CPPLINT_INITIALIZED = True if self.env.CPPLINT_SKIP: return if not self.env.CPPLINT_OUTPUT in CPPLINT_RE: return for src in self.to_list(getattr(self, 'source', [])): if isinstance(src, Node.Node): node = src else: node = self.path.find_or_declare(src) if not node: self.bld.fatal('Could not find %r' % src) self.create_task('cpplint', node) tdb-1.4.2/third_party/waf/waflib/extras/cross_gnu.py0000660000000000000000000001373513444661622022500 0ustar rootroot00000000000000#!/usr/bin/python # -*- coding: utf-8 vi:ts=4:noexpandtab # Tool to provide dedicated variables for cross-compilation __author__ = __maintainer__ = "Jérôme Carretero " __copyright__ = "Jérôme Carretero, 2014" """ This tool allows to use environment variables to define cross-compilation variables intended for build variants. The variables are obtained from the environment in 3 ways: 1. By defining CHOST, they can be derived as ${CHOST}-${TOOL} 2. By defining HOST_x 3. By defining ${CHOST//-/_}_x else one can set ``cfg.env.CHOST`` in ``wscript`` before loading ``cross_gnu``. Usage: - In your build script:: def configure(cfg): ... for variant in x_variants: setenv(variant) conf.load('cross_gnu') conf.xcheck_host_var('POUET') ... - Then:: CHOST=arm-hardfloat-linux-gnueabi waf configure env arm-hardfloat-linux-gnueabi-CC="clang -..." waf configure CFLAGS=... CHOST=arm-hardfloat-linux-gnueabi HOST_CFLAGS=-g waf configure HOST_CC="clang -..." waf configure This example ``wscript`` compiles to Microchip PIC (xc16-gcc-xyz must be in PATH): .. code:: python from waflib import Configure #from https://gist.github.com/rpuntaie/2bddfb5d7b77db26415ee14371289971 import waf_variants variants='pc fw/variant1 fw/variant2'.split() top = "." out = "../build" PIC = '33FJ128GP804' #dsPICxxx @Configure.conf def gcc_modifier_xc16(cfg): v = cfg.env v.cprogram_PATTERN = '%s.elf' v.LINKFLAGS_cprogram = ','.join(['-Wl','','','--defsym=__MPLAB_BUILD=0','','--script=p'+PIC+'.gld', '--stack=16','--check-sections','--data-init','--pack-data','--handles','--isr','--no-gc-sections', '--fill-upper=0','--stackguard=16','--no-force-link','--smart-io']) #,'--report-mem']) v.CFLAGS_cprogram=['-mcpu='+PIC,'-omf=elf','-mlarge-code','-msmart-io=1', '-msfr-warn=off','-mno-override-inline','-finline','-Winline'] def configure(cfg): if 'fw' in cfg.variant: #firmware cfg.env.DEST_OS = 'xc16' #cfg.env.CHOST = 'xc16' #works too cfg.load('c cross_gnu') #cfg.env.CHOST becomes ['xc16'] ... else: #configure for pc SW ... def build(bld): if 'fw' in bld.variant: #firmware bld.program(source='maintst.c', target='maintst'); bld(source='maintst.elf', target='maintst.hex', rule="xc16-bin2hex ${SRC} -a -omf=elf") else: #build for pc SW ... """ import os from waflib import Utils, Configure from waflib.Tools import ccroot, gcc try: from shlex import quote except ImportError: from pipes import quote def get_chost_stuff(conf): """ Get the CHOST environment variable contents """ chost = None chost_envar = None if conf.env.CHOST: chost = conf.env.CHOST[0] chost_envar = chost.replace('-', '_') return chost, chost_envar @Configure.conf def xcheck_var(conf, name, wafname=None, cross=False): wafname = wafname or name if wafname in conf.env: value = conf.env[wafname] if isinstance(value, str): value = [value] else: envar = os.environ.get(name) if not envar: return value = Utils.to_list(envar) if envar != '' else [envar] conf.env[wafname] = value if cross: pretty = 'cross-compilation %s' % wafname else: pretty = wafname conf.msg('Will use %s' % pretty, " ".join(quote(x) for x in value)) @Configure.conf def xcheck_host_prog(conf, name, tool, wafname=None): wafname = wafname or name chost, chost_envar = get_chost_stuff(conf) specific = None if chost: specific = os.environ.get('%s_%s' % (chost_envar, name)) if specific: value = Utils.to_list(specific) conf.env[wafname] += value conf.msg('Will use cross-compilation %s from %s_%s' % (name, chost_envar, name), " ".join(quote(x) for x in value)) return else: envar = os.environ.get('HOST_%s' % name) if envar is not None: value = Utils.to_list(envar) conf.env[wafname] = value conf.msg('Will use cross-compilation %s from HOST_%s' % (name, name), " ".join(quote(x) for x in value)) return if conf.env[wafname]: return value = None if chost: value = '%s-%s' % (chost, tool) if value: conf.env[wafname] = value conf.msg('Will use cross-compilation %s from CHOST' % wafname, value) @Configure.conf def xcheck_host_envar(conf, name, wafname=None): wafname = wafname or name chost, chost_envar = get_chost_stuff(conf) specific = None if chost: specific = os.environ.get('%s_%s' % (chost_envar, name)) if specific: value = Utils.to_list(specific) conf.env[wafname] += value conf.msg('Will use cross-compilation %s from %s_%s' \ % (name, chost_envar, name), " ".join(quote(x) for x in value)) return envar = os.environ.get('HOST_%s' % name) if envar is None: return value = Utils.to_list(envar) if envar != '' else [envar] conf.env[wafname] = value conf.msg('Will use cross-compilation %s from HOST_%s' % (name, name), " ".join(quote(x) for x in value)) @Configure.conf def xcheck_host(conf): conf.xcheck_var('CHOST', cross=True) conf.env.CHOST = conf.env.CHOST or [conf.env.DEST_OS] conf.env.DEST_OS = conf.env.CHOST[0].replace('-','_') conf.xcheck_host_prog('CC', 'gcc') conf.xcheck_host_prog('CXX', 'g++') conf.xcheck_host_prog('LINK_CC', 'gcc') conf.xcheck_host_prog('LINK_CXX', 'g++') conf.xcheck_host_prog('AR', 'ar') conf.xcheck_host_prog('AS', 'as') conf.xcheck_host_prog('LD', 'ld') conf.xcheck_host_envar('CFLAGS') conf.xcheck_host_envar('CXXFLAGS') conf.xcheck_host_envar('LDFLAGS', 'LINKFLAGS') conf.xcheck_host_envar('LIB') conf.xcheck_host_envar('PKG_CONFIG_LIBDIR') conf.xcheck_host_envar('PKG_CONFIG_PATH') if not conf.env.env: conf.env.env = {} conf.env.env.update(os.environ) if conf.env.PKG_CONFIG_LIBDIR: conf.env.env['PKG_CONFIG_LIBDIR'] = conf.env.PKG_CONFIG_LIBDIR[0] if conf.env.PKG_CONFIG_PATH: conf.env.env['PKG_CONFIG_PATH'] = conf.env.PKG_CONFIG_PATH[0] def configure(conf): """ Configuration example for gcc, it will not work for g++/clang/clang++ """ conf.xcheck_host() conf.gcc_common_flags() conf.gcc_modifier_platform() conf.cc_load_tools() conf.cc_add_flags() conf.link_add_flags() tdb-1.4.2/third_party/waf/waflib/extras/cython.py0000660000000000000000000001014513527011455021765 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2010-2015 import re from waflib import Task, Logs from waflib.TaskGen import extension cy_api_pat = re.compile(r'\s*?cdef\s*?(public|api)\w*') re_cyt = re.compile(r""" ^\s* # must begin with some whitespace characters (?:from\s+(\w+)(?:\.\w+)*\s+)? # optionally match "from foo(.baz)" and capture foo c?import\s(\w+|[*]) # require "import bar" and capture bar """, re.M | re.VERBOSE) @extension('.pyx') def add_cython_file(self, node): """ Process a *.pyx* file given in the list of source files. No additional feature is required:: def build(bld): bld(features='c cshlib pyext', source='main.c foo.pyx', target='app') """ ext = '.c' if 'cxx' in self.features: self.env.append_unique('CYTHONFLAGS', '--cplus') ext = '.cc' for x in getattr(self, 'cython_includes', []): # TODO re-use these nodes in "scan" below d = self.path.find_dir(x) if d: self.env.append_unique('CYTHONFLAGS', '-I%s' % d.abspath()) tsk = self.create_task('cython', node, node.change_ext(ext)) self.source += tsk.outputs class cython(Task.Task): run_str = '${CYTHON} ${CYTHONFLAGS} -o ${TGT[0].abspath()} ${SRC}' color = 'GREEN' vars = ['INCLUDES'] """ Rebuild whenever the INCLUDES change. The variables such as CYTHONFLAGS will be appended by the metaclass. """ ext_out = ['.h'] """ The creation of a .h file is known only after the build has begun, so it is not possible to compute a build order just by looking at the task inputs/outputs. """ def runnable_status(self): """ Perform a double-check to add the headers created by cython to the output nodes. The scanner is executed only when the cython task must be executed (optimization). """ ret = super(cython, self).runnable_status() if ret == Task.ASK_LATER: return ret for x in self.generator.bld.raw_deps[self.uid()]: if x.startswith('header:'): self.outputs.append(self.inputs[0].parent.find_or_declare(x.replace('header:', ''))) return super(cython, self).runnable_status() def post_run(self): for x in self.outputs: if x.name.endswith('.h'): if not x.exists(): if Logs.verbose: Logs.warn('Expected %r', x.abspath()) x.write('') return Task.Task.post_run(self) def scan(self): """ Return the dependent files (.pxd) by looking in the include folders. Put the headers to generate in the custom list "bld.raw_deps". To inspect the scanne results use:: $ waf clean build --zones=deps """ node = self.inputs[0] txt = node.read() mods = set() for m in re_cyt.finditer(txt): if m.group(1): # matches "from foo import bar" mods.add(m.group(1)) else: mods.add(m.group(2)) Logs.debug('cython: mods %r', mods) incs = getattr(self.generator, 'cython_includes', []) incs = [self.generator.path.find_dir(x) for x in incs] incs.append(node.parent) found = [] missing = [] for x in sorted(mods): for y in incs: k = y.find_resource(x + '.pxd') if k: found.append(k) break else: missing.append(x) # the cython file implicitly depends on a pxd file that might be present implicit = node.parent.find_resource(node.name[:-3] + 'pxd') if implicit: found.append(implicit) Logs.debug('cython: found %r', found) # Now the .h created - store them in bld.raw_deps for later use has_api = False has_public = False for l in txt.splitlines(): if cy_api_pat.match(l): if ' api ' in l: has_api = True if ' public ' in l: has_public = True name = node.name.replace('.pyx', '') if has_api: missing.append('header:%s_api.h' % name) if has_public: missing.append('header:%s.h' % name) return (found, missing) def options(ctx): ctx.add_option('--cython-flags', action='store', default='', help='space separated list of flags to pass to cython') def configure(ctx): if not ctx.env.CC and not ctx.env.CXX: ctx.fatal('Load a C/C++ compiler first') if not ctx.env.PYTHON: ctx.fatal('Load the python tool first!') ctx.find_program('cython', var='CYTHON') if hasattr(ctx.options, 'cython_flags'): ctx.env.CYTHONFLAGS = ctx.options.cython_flags tdb-1.4.2/third_party/waf/waflib/extras/dcc.py0000660000000000000000000000357313444661622021226 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Jérôme Carretero, 2011 (zougloub) from waflib import Options from waflib.Tools import ccroot from waflib.Configure import conf @conf def find_dcc(conf): conf.find_program(['dcc'], var='CC', path_list=getattr(Options.options, 'diabbindir', "")) conf.env.CC_NAME = 'dcc' @conf def find_dld(conf): conf.find_program(['dld'], var='LINK_CC', path_list=getattr(Options.options, 'diabbindir', "")) conf.env.LINK_CC_NAME = 'dld' @conf def find_dar(conf): conf.find_program(['dar'], var='AR', path_list=getattr(Options.options, 'diabbindir', "")) conf.env.AR_NAME = 'dar' conf.env.ARFLAGS = 'rcs' @conf def find_ddump(conf): conf.find_program(['ddump'], var='DDUMP', path_list=getattr(Options.options, 'diabbindir', "")) @conf def dcc_common_flags(conf): v = conf.env v['CC_SRC_F'] = [] v['CC_TGT_F'] = ['-c', '-o'] # linker if not v['LINK_CC']: v['LINK_CC'] = v['CC'] v['CCLNK_SRC_F'] = [] v['CCLNK_TGT_F'] = ['-o'] v['CPPPATH_ST'] = '-I%s' v['DEFINES_ST'] = '-D%s' v['LIB_ST'] = '-l:%s' # template for adding libs v['LIBPATH_ST'] = '-L%s' # template for adding libpaths v['STLIB_ST'] = '-l:%s' v['STLIBPATH_ST'] = '-L%s' v['RPATH_ST'] = '-Wl,-rpath,%s' #v['STLIB_MARKER'] = '-Wl,-Bstatic' # program v['cprogram_PATTERN'] = '%s.elf' # static lib v['LINKFLAGS_cstlib'] = ['-Wl,-Bstatic'] v['cstlib_PATTERN'] = 'lib%s.a' def configure(conf): conf.find_dcc() conf.find_dar() conf.find_dld() conf.find_ddump() conf.dcc_common_flags() conf.cc_load_tools() conf.cc_add_flags() conf.link_add_flags() def options(opt): """ Add the ``--with-diab-bindir`` command-line options. """ opt.add_option('--with-diab-bindir', type='string', dest='diabbindir', help = 'Specify alternate diab bin folder', default="") tdb-1.4.2/third_party/waf/waflib/extras/distnet.py0000660000000000000000000002650513527011455022142 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 """ waf-powered distributed network builds, with a network cache. Caching files from a server has advantages over a NFS/Samba shared folder: - builds are much faster because they use local files - builds just continue to work in case of a network glitch - permissions are much simpler to manage """ import os, urllib, tarfile, re, shutil, tempfile, sys from collections import OrderedDict from waflib import Context, Utils, Logs try: from urllib.parse import urlencode except ImportError: urlencode = urllib.urlencode def safe_urlencode(data): x = urlencode(data) try: x = x.encode('utf-8') except Exception: pass return x try: from urllib.error import URLError except ImportError: from urllib2 import URLError try: from urllib.request import Request, urlopen except ImportError: from urllib2 import Request, urlopen DISTNETCACHE = os.environ.get('DISTNETCACHE', '/tmp/distnetcache') DISTNETSERVER = os.environ.get('DISTNETSERVER', 'http://localhost:8000/cgi-bin/') TARFORMAT = 'w:bz2' TIMEOUT = 60 REQUIRES = 'requires.txt' re_com = re.compile(r'\s*#.*', re.M) def total_version_order(num): lst = num.split('.') template = '%10s' * len(lst) ret = template % tuple(lst) return ret def get_distnet_cache(): return getattr(Context.g_module, 'DISTNETCACHE', DISTNETCACHE) def get_server_url(): return getattr(Context.g_module, 'DISTNETSERVER', DISTNETSERVER) def get_download_url(): return '%s/download.py' % get_server_url() def get_upload_url(): return '%s/upload.py' % get_server_url() def get_resolve_url(): return '%s/resolve.py' % get_server_url() def send_package_name(): out = getattr(Context.g_module, 'out', 'build') pkgfile = '%s/package_to_upload.tarfile' % out return pkgfile class package(Context.Context): fun = 'package' cmd = 'package' def execute(self): try: files = self.files except AttributeError: files = self.files = [] Context.Context.execute(self) pkgfile = send_package_name() if not pkgfile in files: if not REQUIRES in files: files.append(REQUIRES) self.make_tarfile(pkgfile, files, add_to_package=False) def make_tarfile(self, filename, files, **kw): if kw.get('add_to_package', True): self.files.append(filename) with tarfile.open(filename, TARFORMAT) as tar: endname = os.path.split(filename)[-1] endname = endname.split('.')[0] + '/' for x in files: tarinfo = tar.gettarinfo(x, x) tarinfo.uid = tarinfo.gid = 0 tarinfo.uname = tarinfo.gname = 'root' tarinfo.size = os.stat(x).st_size # TODO - more archive creation options? if kw.get('bare', True): tarinfo.name = os.path.split(x)[1] else: tarinfo.name = endname + x # todo, if tuple, then.. Logs.debug('distnet: adding %r to %s', tarinfo.name, filename) with open(x, 'rb') as f: tar.addfile(tarinfo, f) Logs.info('Created %s', filename) class publish(Context.Context): fun = 'publish' cmd = 'publish' def execute(self): if hasattr(Context.g_module, 'publish'): Context.Context.execute(self) mod = Context.g_module rfile = getattr(self, 'rfile', send_package_name()) if not os.path.isfile(rfile): self.fatal('Create the release file with "waf release" first! %r' % rfile) fdata = Utils.readf(rfile, m='rb') data = safe_urlencode([('pkgdata', fdata), ('pkgname', mod.APPNAME), ('pkgver', mod.VERSION)]) req = Request(get_upload_url(), data) response = urlopen(req, timeout=TIMEOUT) data = response.read().strip() if sys.hexversion>0x300000f: data = data.decode('utf-8') if data != 'ok': self.fatal('Could not publish the package %r' % data) class constraint(object): def __init__(self, line=''): self.required_line = line self.info = [] line = line.strip() if not line: return lst = line.split(',') if lst: self.pkgname = lst[0] self.required_version = lst[1] for k in lst: a, b, c = k.partition('=') if a and c: self.info.append((a, c)) def __str__(self): buf = [] buf.append(self.pkgname) buf.append(self.required_version) for k in self.info: buf.append('%s=%s' % k) return ','.join(buf) def __repr__(self): return "requires %s-%s" % (self.pkgname, self.required_version) def human_display(self, pkgname, pkgver): return '%s-%s requires %s-%s' % (pkgname, pkgver, self.pkgname, self.required_version) def why(self): ret = [] for x in self.info: if x[0] == 'reason': ret.append(x[1]) return ret def add_reason(self, reason): self.info.append(('reason', reason)) def parse_constraints(text): assert(text is not None) constraints = [] text = re.sub(re_com, '', text) lines = text.splitlines() for line in lines: line = line.strip() if not line: continue constraints.append(constraint(line)) return constraints def list_package_versions(cachedir, pkgname): pkgdir = os.path.join(cachedir, pkgname) try: versions = os.listdir(pkgdir) except OSError: return [] versions.sort(key=total_version_order) versions.reverse() return versions class package_reader(Context.Context): cmd = 'solver' fun = 'solver' def __init__(self, **kw): Context.Context.__init__(self, **kw) self.myproject = getattr(Context.g_module, 'APPNAME', 'project') self.myversion = getattr(Context.g_module, 'VERSION', '1.0') self.cache_constraints = {} self.constraints = [] def compute_dependencies(self, filename=REQUIRES): text = Utils.readf(filename) data = safe_urlencode([('text', text)]) if '--offline' in sys.argv: self.constraints = self.local_resolve(text) else: req = Request(get_resolve_url(), data) try: response = urlopen(req, timeout=TIMEOUT) except URLError as e: Logs.warn('The package server is down! %r', e) self.constraints = self.local_resolve(text) else: ret = response.read() try: ret = ret.decode('utf-8') except Exception: pass self.trace(ret) self.constraints = parse_constraints(ret) self.check_errors() def check_errors(self): errors = False for c in self.constraints: if not c.required_version: errors = True reasons = c.why() if len(reasons) == 1: Logs.error('%s but no matching package could be found in this repository', reasons[0]) else: Logs.error('Conflicts on package %r:', c.pkgname) for r in reasons: Logs.error(' %s', r) if errors: self.fatal('The package requirements cannot be satisfied!') def load_constraints(self, pkgname, pkgver, requires=REQUIRES): try: return self.cache_constraints[(pkgname, pkgver)] except KeyError: text = Utils.readf(os.path.join(get_distnet_cache(), pkgname, pkgver, requires)) ret = parse_constraints(text) self.cache_constraints[(pkgname, pkgver)] = ret return ret def apply_constraint(self, domain, constraint): vname = constraint.required_version.replace('*', '.*') rev = re.compile(vname, re.M) ret = [x for x in domain if rev.match(x)] return ret def trace(self, *k): if getattr(self, 'debug', None): Logs.error(*k) def solve(self, packages_to_versions={}, packages_to_constraints={}, pkgname='', pkgver='', todo=[], done=[]): # breadth first search n_packages_to_versions = dict(packages_to_versions) n_packages_to_constraints = dict(packages_to_constraints) self.trace("calling solve with %r %r %r" % (packages_to_versions, todo, done)) done = done + [pkgname] constraints = self.load_constraints(pkgname, pkgver) self.trace("constraints %r" % constraints) for k in constraints: try: domain = n_packages_to_versions[k.pkgname] except KeyError: domain = list_package_versions(get_distnet_cache(), k.pkgname) self.trace("constraints?") if not k.pkgname in done: todo = todo + [k.pkgname] self.trace("domain before %s -> %s, %r" % (pkgname, k.pkgname, domain)) # apply the constraint domain = self.apply_constraint(domain, k) self.trace("domain after %s -> %s, %r" % (pkgname, k.pkgname, domain)) n_packages_to_versions[k.pkgname] = domain # then store the constraint applied constraints = list(packages_to_constraints.get(k.pkgname, [])) constraints.append((pkgname, pkgver, k)) n_packages_to_constraints[k.pkgname] = constraints if not domain: self.trace("no domain while processing constraint %r from %r %r" % (domain, pkgname, pkgver)) return (n_packages_to_versions, n_packages_to_constraints) # next package on the todo list if not todo: return (n_packages_to_versions, n_packages_to_constraints) n_pkgname = todo[0] n_pkgver = n_packages_to_versions[n_pkgname][0] tmp = dict(n_packages_to_versions) tmp[n_pkgname] = [n_pkgver] self.trace("fixed point %s" % n_pkgname) return self.solve(tmp, n_packages_to_constraints, n_pkgname, n_pkgver, todo[1:], done) def get_results(self): return '\n'.join([str(c) for c in self.constraints]) def solution_to_constraints(self, versions, constraints): solution = [] for p in versions: c = constraint() solution.append(c) c.pkgname = p if versions[p]: c.required_version = versions[p][0] else: c.required_version = '' for (from_pkgname, from_pkgver, c2) in constraints.get(p, ''): c.add_reason(c2.human_display(from_pkgname, from_pkgver)) return solution def local_resolve(self, text): self.cache_constraints[(self.myproject, self.myversion)] = parse_constraints(text) p2v = OrderedDict({self.myproject: [self.myversion]}) (versions, constraints) = self.solve(p2v, {}, self.myproject, self.myversion, []) return self.solution_to_constraints(versions, constraints) def download_to_file(self, pkgname, pkgver, subdir, tmp): data = safe_urlencode([('pkgname', pkgname), ('pkgver', pkgver), ('pkgfile', subdir)]) req = urlopen(get_download_url(), data, timeout=TIMEOUT) with open(tmp, 'wb') as f: while True: buf = req.read(8192) if not buf: break f.write(buf) def extract_tar(self, subdir, pkgdir, tmpfile): with tarfile.open(tmpfile) as f: temp = tempfile.mkdtemp(dir=pkgdir) try: f.extractall(temp) os.rename(temp, os.path.join(pkgdir, subdir)) finally: try: shutil.rmtree(temp) except Exception: pass def get_pkg_dir(self, pkgname, pkgver, subdir): pkgdir = os.path.join(get_distnet_cache(), pkgname, pkgver) if not os.path.isdir(pkgdir): os.makedirs(pkgdir) target = os.path.join(pkgdir, subdir) if os.path.exists(target): return target (fd, tmp) = tempfile.mkstemp(dir=pkgdir) try: os.close(fd) self.download_to_file(pkgname, pkgver, subdir, tmp) if subdir == REQUIRES: os.rename(tmp, target) else: self.extract_tar(subdir, pkgdir, tmp) finally: try: os.remove(tmp) except OSError: pass return target def __iter__(self): if not self.constraints: self.compute_dependencies() for x in self.constraints: if x.pkgname == self.myproject: continue yield x def execute(self): self.compute_dependencies() packages = package_reader() def load_tools(ctx, extra): global packages for c in packages: packages.get_pkg_dir(c.pkgname, c.required_version, extra) noarchdir = packages.get_pkg_dir(c.pkgname, c.required_version, 'noarch') for x in os.listdir(noarchdir): if x.startswith('waf_') and x.endswith('.py'): ctx.load([x.rstrip('.py')], tooldir=[noarchdir]) def options(opt): opt.add_option('--offline', action='store_true') packages.execute() load_tools(opt, REQUIRES) def configure(conf): load_tools(conf, conf.variant) def build(bld): load_tools(bld, bld.variant) tdb-1.4.2/third_party/waf/waflib/extras/doxygen.py0000660000000000000000000001624513527011455022145 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: UTF-8 # Thomas Nagy 2008-2010 (ita) """ Doxygen support Variables passed to bld(): * doxyfile -- the Doxyfile to use * doxy_tar -- destination archive for generated documentation (if desired) * install_path -- where to install the documentation * pars -- dictionary overriding doxygen configuration settings When using this tool, the wscript will look like: def options(opt): opt.load('doxygen') def configure(conf): conf.load('doxygen') # check conf.env.DOXYGEN, if it is mandatory def build(bld): if bld.env.DOXYGEN: bld(features="doxygen", doxyfile='Doxyfile', ...) """ import os, os.path, re from collections import OrderedDict from waflib import Task, Utils, Node from waflib.TaskGen import feature DOXY_STR = '"${DOXYGEN}" - ' DOXY_FMTS = 'html latex man rft xml'.split() DOXY_FILE_PATTERNS = '*.' + ' *.'.join(''' c cc cxx cpp c++ java ii ixx ipp i++ inl h hh hxx hpp h++ idl odl cs php php3 inc m mm py f90c cc cxx cpp c++ java ii ixx ipp i++ inl h hh hxx '''.split()) re_rl = re.compile('\\\\\r*\n', re.MULTILINE) re_nl = re.compile('\r*\n', re.M) def parse_doxy(txt): ''' Parses a doxygen file. Returns an ordered dictionary. We cannot return a default dictionary, as the order in which the entries are reported does matter, especially for the '@INCLUDE' lines. ''' tbl = OrderedDict() txt = re_rl.sub('', txt) lines = re_nl.split(txt) for x in lines: x = x.strip() if not x or x.startswith('#') or x.find('=') < 0: continue if x.find('+=') >= 0: tmp = x.split('+=') key = tmp[0].strip() if key in tbl: tbl[key] += ' ' + '+='.join(tmp[1:]).strip() else: tbl[key] = '+='.join(tmp[1:]).strip() else: tmp = x.split('=') tbl[tmp[0].strip()] = '='.join(tmp[1:]).strip() return tbl class doxygen(Task.Task): vars = ['DOXYGEN', 'DOXYFLAGS'] color = 'BLUE' def runnable_status(self): ''' self.pars are populated in runnable_status - because this function is being run *before* both self.pars "consumers" - scan() and run() set output_dir (node) for the output ''' for x in self.run_after: if not x.hasrun: return Task.ASK_LATER if not getattr(self, 'pars', None): txt = self.inputs[0].read() self.pars = parse_doxy(txt) if self.pars.get('OUTPUT_DIRECTORY'): # Use the path parsed from the Doxyfile as an absolute path output_node = self.inputs[0].parent.get_bld().make_node(self.pars['OUTPUT_DIRECTORY']) else: # If no OUTPUT_PATH was specified in the Doxyfile, build path from the Doxyfile name + '.doxy' output_node = self.inputs[0].parent.get_bld().make_node(self.inputs[0].name + '.doxy') output_node.mkdir() self.pars['OUTPUT_DIRECTORY'] = output_node.abspath() # Override with any parameters passed to the task generator if getattr(self.generator, 'pars', None): for k, v in self.generator.pars.items(): self.pars[k] = v self.doxy_inputs = getattr(self, 'doxy_inputs', []) if not self.pars.get('INPUT'): self.doxy_inputs.append(self.inputs[0].parent) else: for i in self.pars.get('INPUT').split(): if os.path.isabs(i): node = self.generator.bld.root.find_node(i) else: node = self.inputs[0].parent.find_node(i) if not node: self.generator.bld.fatal('Could not find the doxygen input %r' % i) self.doxy_inputs.append(node) if not getattr(self, 'output_dir', None): bld = self.generator.bld # Output path is always an absolute path as it was transformed above. self.output_dir = bld.root.find_dir(self.pars['OUTPUT_DIRECTORY']) self.signature() ret = Task.Task.runnable_status(self) if ret == Task.SKIP_ME: # in case the files were removed self.add_install() return ret def scan(self): exclude_patterns = self.pars.get('EXCLUDE_PATTERNS','').split() exclude_patterns = [pattern.replace('*/', '**/') for pattern in exclude_patterns] file_patterns = self.pars.get('FILE_PATTERNS','').split() if not file_patterns: file_patterns = DOXY_FILE_PATTERNS.split() if self.pars.get('RECURSIVE') == 'YES': file_patterns = ["**/%s" % pattern for pattern in file_patterns] nodes = [] names = [] for node in self.doxy_inputs: if os.path.isdir(node.abspath()): for m in node.ant_glob(incl=file_patterns, excl=exclude_patterns): nodes.append(m) else: nodes.append(node) return (nodes, names) def run(self): dct = self.pars.copy() code = '\n'.join(['%s = %s' % (x, dct[x]) for x in self.pars]) code = code.encode() # for python 3 #fmt = DOXY_STR % (self.inputs[0].parent.abspath()) cmd = Utils.subst_vars(DOXY_STR, self.env) env = self.env.env or None proc = Utils.subprocess.Popen(cmd, shell=True, stdin=Utils.subprocess.PIPE, env=env, cwd=self.inputs[0].parent.abspath()) proc.communicate(code) return proc.returncode def post_run(self): nodes = self.output_dir.ant_glob('**/*', quiet=True) for x in nodes: self.generator.bld.node_sigs[x] = self.uid() self.add_install() return Task.Task.post_run(self) def add_install(self): nodes = self.output_dir.ant_glob('**/*', quiet=True) self.outputs += nodes if getattr(self.generator, 'install_path', None): if not getattr(self.generator, 'doxy_tar', None): self.generator.add_install_files(install_to=self.generator.install_path, install_from=self.outputs, postpone=False, cwd=self.output_dir, relative_trick=True) class tar(Task.Task): "quick tar creation" run_str = '${TAR} ${TAROPTS} ${TGT} ${SRC}' color = 'RED' after = ['doxygen'] def runnable_status(self): for x in getattr(self, 'input_tasks', []): if not x.hasrun: return Task.ASK_LATER if not getattr(self, 'tar_done_adding', None): # execute this only once self.tar_done_adding = True for x in getattr(self, 'input_tasks', []): self.set_inputs(x.outputs) if not self.inputs: return Task.SKIP_ME return Task.Task.runnable_status(self) def __str__(self): tgt_str = ' '.join([a.path_from(a.ctx.launch_node()) for a in self.outputs]) return '%s: %s\n' % (self.__class__.__name__, tgt_str) @feature('doxygen') def process_doxy(self): if not getattr(self, 'doxyfile', None): self.bld.fatal('no doxyfile variable specified??') node = self.doxyfile if not isinstance(node, Node.Node): node = self.path.find_resource(node) if not node: self.bld.fatal('doxygen file %s not found' % self.doxyfile) # the task instance dsk = self.create_task('doxygen', node) if getattr(self, 'doxy_tar', None): tsk = self.create_task('tar') tsk.input_tasks = [dsk] tsk.set_outputs(self.path.find_or_declare(self.doxy_tar)) if self.doxy_tar.endswith('bz2'): tsk.env['TAROPTS'] = ['cjf'] elif self.doxy_tar.endswith('gz'): tsk.env['TAROPTS'] = ['czf'] else: tsk.env['TAROPTS'] = ['cf'] if getattr(self, 'install_path', None): self.add_install_files(install_to=self.install_path, install_from=tsk.outputs) def configure(conf): ''' Check if doxygen and tar commands are present in the system If the commands are present, then conf.env.DOXYGEN and conf.env.TAR variables will be set. Detection can be controlled by setting DOXYGEN and TAR environmental variables. ''' conf.find_program('doxygen', var='DOXYGEN', mandatory=False) conf.find_program('tar', var='TAR', mandatory=False) tdb-1.4.2/third_party/waf/waflib/extras/dpapi.py0000660000000000000000000000556613444661622021576 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Matt Clarkson, 2012 ''' DPAPI access library (http://msdn.microsoft.com/en-us/library/ms995355.aspx) This file uses code originally created by Crusher Joe: http://article.gmane.org/gmane.comp.python.ctypes/420 And modified by Wayne Koorts: http://stackoverflow.com/questions/463832/using-dpapi-with-python ''' from ctypes import windll, byref, cdll, Structure, POINTER, c_char, c_buffer from ctypes.wintypes import DWORD from waflib.Configure import conf LocalFree = windll.kernel32.LocalFree memcpy = cdll.msvcrt.memcpy CryptProtectData = windll.crypt32.CryptProtectData CryptUnprotectData = windll.crypt32.CryptUnprotectData CRYPTPROTECT_UI_FORBIDDEN = 0x01 try: extra_entropy = 'cl;ad13 \0al;323kjd #(adl;k$#ajsd'.encode('ascii') except AttributeError: extra_entropy = 'cl;ad13 \0al;323kjd #(adl;k$#ajsd' class DATA_BLOB(Structure): _fields_ = [ ('cbData', DWORD), ('pbData', POINTER(c_char)) ] def get_data(blob_out): cbData = int(blob_out.cbData) pbData = blob_out.pbData buffer = c_buffer(cbData) memcpy(buffer, pbData, cbData) LocalFree(pbData) return buffer.raw @conf def dpapi_encrypt_data(self, input_bytes, entropy = extra_entropy): ''' Encrypts data and returns byte string :param input_bytes: The data to be encrypted :type input_bytes: String or Bytes :param entropy: Extra entropy to add to the encryption process (optional) :type entropy: String or Bytes ''' if not isinstance(input_bytes, bytes) or not isinstance(entropy, bytes): self.fatal('The inputs to dpapi must be bytes') buffer_in = c_buffer(input_bytes, len(input_bytes)) buffer_entropy = c_buffer(entropy, len(entropy)) blob_in = DATA_BLOB(len(input_bytes), buffer_in) blob_entropy = DATA_BLOB(len(entropy), buffer_entropy) blob_out = DATA_BLOB() if CryptProtectData(byref(blob_in), 'python_data', byref(blob_entropy), None, None, CRYPTPROTECT_UI_FORBIDDEN, byref(blob_out)): return get_data(blob_out) else: self.fatal('Failed to decrypt data') @conf def dpapi_decrypt_data(self, encrypted_bytes, entropy = extra_entropy): ''' Decrypts data and returns byte string :param encrypted_bytes: The encrypted data :type encrypted_bytes: Bytes :param entropy: Extra entropy to add to the encryption process (optional) :type entropy: String or Bytes ''' if not isinstance(encrypted_bytes, bytes) or not isinstance(entropy, bytes): self.fatal('The inputs to dpapi must be bytes') buffer_in = c_buffer(encrypted_bytes, len(encrypted_bytes)) buffer_entropy = c_buffer(entropy, len(entropy)) blob_in = DATA_BLOB(len(encrypted_bytes), buffer_in) blob_entropy = DATA_BLOB(len(entropy), buffer_entropy) blob_out = DATA_BLOB() if CryptUnprotectData(byref(blob_in), None, byref(blob_entropy), None, None, CRYPTPROTECT_UI_FORBIDDEN, byref(blob_out)): return get_data(blob_out) else: self.fatal('Failed to decrypt data') tdb-1.4.2/third_party/waf/waflib/extras/eclipse.py0000660000000000000000000003777613444661622022135 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Eclipse CDT 5.0 generator for Waf # Richard Quirk 2009-1011 (New BSD License) # Thomas Nagy 2011 (ported to Waf 1.6) """ Usage: def options(opt): opt.load('eclipse') $ waf configure eclipse """ import sys, os from waflib import Utils, Logs, Context, Build, TaskGen, Scripting, Errors, Node from xml.dom.minidom import Document STANDARD_INCLUDES = [ '/usr/local/include', '/usr/include' ] oe_cdt = 'org.eclipse.cdt' cdt_mk = oe_cdt + '.make.core' cdt_core = oe_cdt + '.core' cdt_bld = oe_cdt + '.build.core' extbuilder_dir = '.externalToolBuilders' extbuilder_name = 'Waf_Builder.launch' class eclipse(Build.BuildContext): cmd = 'eclipse' fun = Scripting.default_cmd def execute(self): """ Entry point """ self.restore() if not self.all_envs: self.load_envs() self.recurse([self.run_dir]) appname = getattr(Context.g_module, Context.APPNAME, os.path.basename(self.srcnode.abspath())) self.create_cproject(appname, pythonpath=self.env['ECLIPSE_PYTHON_PATH']) # Helper to dump the XML document content to XML with UTF-8 encoding def write_conf_to_xml(self, filename, document): self.srcnode.make_node(filename).write(document.toprettyxml(encoding='UTF-8'), flags='wb') def create_cproject(self, appname, workspace_includes=[], pythonpath=[]): """ Create the Eclipse CDT .project and .cproject files @param appname The name that will appear in the Project Explorer @param build The BuildContext object to extract includes from @param workspace_includes Optional project includes to prevent "Unresolved Inclusion" errors in the Eclipse editor @param pythonpath Optional project specific python paths """ hasc = hasjava = haspython = False source_dirs = [] cpppath = self.env['CPPPATH'] javasrcpath = [] javalibpath = [] includes = STANDARD_INCLUDES if sys.platform != 'win32': cc = self.env.CC or self.env.CXX if cc: cmd = cc + ['-xc++', '-E', '-Wp,-v', '-'] try: gccout = self.cmd_and_log(cmd, output=Context.STDERR, quiet=Context.BOTH, input='\n'.encode()).splitlines() except Errors.WafError: pass else: includes = [] for ipath in gccout: if ipath.startswith(' /'): includes.append(ipath[1:]) cpppath += includes Logs.warn('Generating Eclipse CDT project files') for g in self.groups: for tg in g: if not isinstance(tg, TaskGen.task_gen): continue tg.post() # Add local Python modules paths to configuration so object resolving will work in IDE # This may also contain generated files (ie. pyqt5 or protoc) that get picked from build if 'py' in tg.features: pypath = tg.path.relpath() py_installfrom = getattr(tg, 'install_from', None) if isinstance(py_installfrom, Node.Node): pypath = py_installfrom.path_from(self.root.make_node(self.top_dir)) if pypath not in pythonpath: pythonpath.append(pypath) haspython = True # Add Java source directories so object resolving works in IDE # This may also contain generated files (ie. protoc) that get picked from build if 'javac' in tg.features: java_src = tg.path.relpath() java_srcdir = getattr(tg.javac_task, 'srcdir', None) if java_srcdir: if isinstance(java_srcdir, Node.Node): java_srcdir = [java_srcdir] for x in Utils.to_list(java_srcdir): x = x.path_from(self.root.make_node(self.top_dir)) if x not in javasrcpath: javasrcpath.append(x) else: if java_src not in javasrcpath: javasrcpath.append(java_src) hasjava = True # Check if there are external dependencies and add them as external jar so they will be resolved by Eclipse usedlibs=getattr(tg, 'use', []) for x in Utils.to_list(usedlibs): for cl in Utils.to_list(tg.env['CLASSPATH_'+x]): if cl not in javalibpath: javalibpath.append(cl) if not getattr(tg, 'link_task', None): continue features = Utils.to_list(getattr(tg, 'features', '')) is_cc = 'c' in features or 'cxx' in features incnodes = tg.to_incnodes(tg.to_list(getattr(tg, 'includes', [])) + tg.env['INCLUDES']) for p in incnodes: path = p.path_from(self.srcnode) if (path.startswith("/")): cpppath.append(path) else: workspace_includes.append(path) if is_cc and path not in source_dirs: source_dirs.append(path) hasc = True waf_executable = os.path.abspath(sys.argv[0]) project = self.impl_create_project(sys.executable, appname, hasc, hasjava, haspython, waf_executable) self.write_conf_to_xml('.project', project) if hasc: project = self.impl_create_cproject(sys.executable, waf_executable, appname, workspace_includes, cpppath, source_dirs) self.write_conf_to_xml('.cproject', project) if haspython: project = self.impl_create_pydevproject(sys.path, pythonpath) self.write_conf_to_xml('.pydevproject', project) if hasjava: project = self.impl_create_javaproject(javasrcpath, javalibpath) self.write_conf_to_xml('.classpath', project) def impl_create_project(self, executable, appname, hasc, hasjava, haspython, waf_executable): doc = Document() projectDescription = doc.createElement('projectDescription') self.add(doc, projectDescription, 'name', appname) self.add(doc, projectDescription, 'comment') self.add(doc, projectDescription, 'projects') buildSpec = self.add(doc, projectDescription, 'buildSpec') buildCommand = self.add(doc, buildSpec, 'buildCommand') self.add(doc, buildCommand, 'triggers', 'clean,full,incremental,') arguments = self.add(doc, buildCommand, 'arguments') dictionaries = {} # If CDT is present, instruct this one to call waf as it is more flexible (separate build/clean ...) if hasc: self.add(doc, buildCommand, 'name', oe_cdt + '.managedbuilder.core.genmakebuilder') # the default make-style targets are overwritten by the .cproject values dictionaries = { cdt_mk + '.contents': cdt_mk + '.activeConfigSettings', cdt_mk + '.enableAutoBuild': 'false', cdt_mk + '.enableCleanBuild': 'true', cdt_mk + '.enableFullBuild': 'true', } else: # Otherwise for Java/Python an external builder tool is created that will call waf build self.add(doc, buildCommand, 'name', 'org.eclipse.ui.externaltools.ExternalToolBuilder') dictionaries = { 'LaunchConfigHandle': '/%s/%s'%(extbuilder_dir, extbuilder_name), } # The definition is in a separate directory XML file try: os.mkdir(extbuilder_dir) except OSError: pass # Ignore error if already exists # Populate here the external builder XML calling waf builder = Document() launchConfiguration = doc.createElement('launchConfiguration') launchConfiguration.setAttribute('type', 'org.eclipse.ui.externaltools.ProgramBuilderLaunchConfigurationType') self.add(doc, launchConfiguration, 'booleanAttribute', {'key': 'org.eclipse.debug.ui.ATTR_LAUNCH_IN_BACKGROUND', 'value': 'false'}) self.add(doc, launchConfiguration, 'booleanAttribute', {'key': 'org.eclipse.ui.externaltools.ATTR_TRIGGERS_CONFIGURED', 'value': 'true'}) self.add(doc, launchConfiguration, 'stringAttribute', {'key': 'org.eclipse.ui.externaltools.ATTR_LOCATION', 'value': waf_executable}) self.add(doc, launchConfiguration, 'stringAttribute', {'key': 'org.eclipse.ui.externaltools.ATTR_RUN_BUILD_KINDS', 'value': 'full,incremental,'}) self.add(doc, launchConfiguration, 'stringAttribute', {'key': 'org.eclipse.ui.externaltools.ATTR_TOOL_ARGUMENTS', 'value': 'build'}) self.add(doc, launchConfiguration, 'stringAttribute', {'key': 'org.eclipse.ui.externaltools.ATTR_WORKING_DIRECTORY', 'value': '${project_loc}'}) builder.appendChild(launchConfiguration) # And write the XML to the file references before self.write_conf_to_xml('%s%s%s'%(extbuilder_dir, os.path.sep, extbuilder_name), builder) for k, v in dictionaries.items(): self.addDictionary(doc, arguments, k, v) natures = self.add(doc, projectDescription, 'natures') if hasc: nature_list = """ core.ccnature managedbuilder.core.ScannerConfigNature managedbuilder.core.managedBuildNature core.cnature """.split() for n in nature_list: self.add(doc, natures, 'nature', oe_cdt + '.' + n) if haspython: self.add(doc, natures, 'nature', 'org.python.pydev.pythonNature') if hasjava: self.add(doc, natures, 'nature', 'org.eclipse.jdt.core.javanature') doc.appendChild(projectDescription) return doc def impl_create_cproject(self, executable, waf_executable, appname, workspace_includes, cpppath, source_dirs=[]): doc = Document() doc.appendChild(doc.createProcessingInstruction('fileVersion', '4.0.0')) cconf_id = cdt_core + '.default.config.1' cproject = doc.createElement('cproject') storageModule = self.add(doc, cproject, 'storageModule', {'moduleId': cdt_core + '.settings'}) cconf = self.add(doc, storageModule, 'cconfiguration', {'id':cconf_id}) storageModule = self.add(doc, cconf, 'storageModule', {'buildSystemId': oe_cdt + '.managedbuilder.core.configurationDataProvider', 'id': cconf_id, 'moduleId': cdt_core + '.settings', 'name': 'Default'}) self.add(doc, storageModule, 'externalSettings') extensions = self.add(doc, storageModule, 'extensions') extension_list = """ VCErrorParser MakeErrorParser GCCErrorParser GASErrorParser GLDErrorParser """.split() self.add(doc, extensions, 'extension', {'id': cdt_core + '.ELF', 'point':cdt_core + '.BinaryParser'}) for e in extension_list: self.add(doc, extensions, 'extension', {'id': cdt_core + '.' + e, 'point':cdt_core + '.ErrorParser'}) storageModule = self.add(doc, cconf, 'storageModule', {'moduleId': 'cdtBuildSystem', 'version': '4.0.0'}) config = self.add(doc, storageModule, 'configuration', {'artifactName': appname, 'id': cconf_id, 'name': 'Default', 'parent': cdt_bld + '.prefbase.cfg'}) folderInfo = self.add(doc, config, 'folderInfo', {'id': cconf_id+'.', 'name': '/', 'resourcePath': ''}) toolChain = self.add(doc, folderInfo, 'toolChain', {'id': cdt_bld + '.prefbase.toolchain.1', 'name': 'No ToolChain', 'resourceTypeBasedDiscovery': 'false', 'superClass': cdt_bld + '.prefbase.toolchain'}) self.add(doc, toolChain, 'targetPlatform', {'binaryParser': 'org.eclipse.cdt.core.ELF', 'id': cdt_bld + '.prefbase.toolchain.1', 'name': ''}) waf_build = '"%s" %s'%(waf_executable, eclipse.fun) waf_clean = '"%s" clean'%(waf_executable) self.add(doc, toolChain, 'builder', {'autoBuildTarget': waf_build, 'command': executable, 'enableAutoBuild': 'false', 'cleanBuildTarget': waf_clean, 'enableIncrementalBuild': 'true', 'id': cdt_bld + '.settings.default.builder.1', 'incrementalBuildTarget': waf_build, 'managedBuildOn': 'false', 'name': 'Gnu Make Builder', 'superClass': cdt_bld + '.settings.default.builder'}) tool_index = 1; for tool_name in ("Assembly", "GNU C++", "GNU C"): tool = self.add(doc, toolChain, 'tool', {'id': cdt_bld + '.settings.holder.' + str(tool_index), 'name': tool_name, 'superClass': cdt_bld + '.settings.holder'}) if cpppath or workspace_includes: incpaths = cdt_bld + '.settings.holder.incpaths' option = self.add(doc, tool, 'option', {'id': incpaths + '.' + str(tool_index), 'name': 'Include Paths', 'superClass': incpaths, 'valueType': 'includePath'}) for i in workspace_includes: self.add(doc, option, 'listOptionValue', {'builtIn': 'false', 'value': '"${workspace_loc:/%s/%s}"'%(appname, i)}) for i in cpppath: self.add(doc, option, 'listOptionValue', {'builtIn': 'false', 'value': '"%s"'%(i)}) if tool_name == "GNU C++" or tool_name == "GNU C": self.add(doc,tool,'inputType',{ 'id':'org.eclipse.cdt.build.core.settings.holder.inType.' + str(tool_index), \ 'languageId':'org.eclipse.cdt.core.gcc' if tool_name == "GNU C" else 'org.eclipse.cdt.core.g++','languageName':tool_name, \ 'sourceContentType':'org.eclipse.cdt.core.cSource,org.eclipse.cdt.core.cHeader', \ 'superClass':'org.eclipse.cdt.build.core.settings.holder.inType' }) tool_index += 1 if source_dirs: sourceEntries = self.add(doc, config, 'sourceEntries') for i in source_dirs: self.add(doc, sourceEntries, 'entry', {'excluding': i, 'flags': 'VALUE_WORKSPACE_PATH|RESOLVED', 'kind': 'sourcePath', 'name': ''}) self.add(doc, sourceEntries, 'entry', { 'flags': 'VALUE_WORKSPACE_PATH|RESOLVED', 'kind': 'sourcePath', 'name': i}) storageModule = self.add(doc, cconf, 'storageModule', {'moduleId': cdt_mk + '.buildtargets'}) buildTargets = self.add(doc, storageModule, 'buildTargets') def addTargetWrap(name, runAll): return self.addTarget(doc, buildTargets, executable, name, '"%s" %s'%(waf_executable, name), runAll) addTargetWrap('configure', True) addTargetWrap('dist', False) addTargetWrap('install', False) addTargetWrap('check', False) storageModule = self.add(doc, cproject, 'storageModule', {'moduleId': 'cdtBuildSystem', 'version': '4.0.0'}) self.add(doc, storageModule, 'project', {'id': '%s.null.1'%appname, 'name': appname}) doc.appendChild(cproject) return doc def impl_create_pydevproject(self, system_path, user_path): # create a pydevproject file doc = Document() doc.appendChild(doc.createProcessingInstruction('eclipse-pydev', 'version="1.0"')) pydevproject = doc.createElement('pydev_project') prop = self.add(doc, pydevproject, 'pydev_property', 'python %d.%d'%(sys.version_info[0], sys.version_info[1])) prop.setAttribute('name', 'org.python.pydev.PYTHON_PROJECT_VERSION') prop = self.add(doc, pydevproject, 'pydev_property', 'Default') prop.setAttribute('name', 'org.python.pydev.PYTHON_PROJECT_INTERPRETER') # add waf's paths wafadmin = [p for p in system_path if p.find('wafadmin') != -1] if wafadmin: prop = self.add(doc, pydevproject, 'pydev_pathproperty', {'name':'org.python.pydev.PROJECT_EXTERNAL_SOURCE_PATH'}) for i in wafadmin: self.add(doc, prop, 'path', i) if user_path: prop = self.add(doc, pydevproject, 'pydev_pathproperty', {'name':'org.python.pydev.PROJECT_SOURCE_PATH'}) for i in user_path: self.add(doc, prop, 'path', '/${PROJECT_DIR_NAME}/'+i) doc.appendChild(pydevproject) return doc def impl_create_javaproject(self, javasrcpath, javalibpath): # create a .classpath file for java usage doc = Document() javaproject = doc.createElement('classpath') if javasrcpath: for i in javasrcpath: self.add(doc, javaproject, 'classpathentry', {'kind': 'src', 'path': i}) if javalibpath: for i in javalibpath: self.add(doc, javaproject, 'classpathentry', {'kind': 'lib', 'path': i}) self.add(doc, javaproject, 'classpathentry', {'kind': 'con', 'path': 'org.eclipse.jdt.launching.JRE_CONTAINER'}) self.add(doc, javaproject, 'classpathentry', {'kind': 'output', 'path': self.bldnode.name }) doc.appendChild(javaproject) return doc def addDictionary(self, doc, parent, k, v): dictionary = self.add(doc, parent, 'dictionary') self.add(doc, dictionary, 'key', k) self.add(doc, dictionary, 'value', v) return dictionary def addTarget(self, doc, buildTargets, executable, name, buildTarget, runAllBuilders=True): target = self.add(doc, buildTargets, 'target', {'name': name, 'path': '', 'targetID': oe_cdt + '.build.MakeTargetBuilder'}) self.add(doc, target, 'buildCommand', executable) self.add(doc, target, 'buildArguments', None) self.add(doc, target, 'buildTarget', buildTarget) self.add(doc, target, 'stopOnError', 'true') self.add(doc, target, 'useDefaultCommand', 'false') self.add(doc, target, 'runAllBuilders', str(runAllBuilders).lower()) def add(self, doc, parent, tag, value = None): el = doc.createElement(tag) if (value): if type(value) == type(str()): el.appendChild(doc.createTextNode(value)) elif type(value) == type(dict()): self.setAttributes(el, value) parent.appendChild(el) return el def setAttributes(self, node, attrs): for k, v in attrs.items(): node.setAttribute(k, v) tdb-1.4.2/third_party/waf/waflib/extras/erlang.py0000660000000000000000000000667313527011455021744 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2010 (ita) # Przemyslaw Rzepecki, 2016 """ Erlang support """ import re from waflib import Task, TaskGen from waflib.TaskGen import feature, after_method, before_method # to load the method "to_incnodes" below from waflib.Tools import ccroot # Those flags are required by the Erlang VM to execute/evaluate code in # non-interactive mode. It is used in this tool to create Erlang modules # documentation and run unit tests. The user can pass additional arguments to the # 'erl' command with ERL_FLAGS environment variable. EXEC_NON_INTERACTIVE = ['-noshell', '-noinput', '-eval'] def configure(conf): conf.find_program('erlc', var='ERLC') conf.find_program('erl', var='ERL') conf.add_os_flags('ERLC_FLAGS') conf.add_os_flags('ERL_FLAGS') conf.env.ERLC_DEF_PATTERN = '-D%s' conf.env.ERLC_INC_PATTERN = '-I%s' @TaskGen.extension('.erl') def process_erl_node(self, node): tsk = self.create_task('erl', node, node.change_ext('.beam')) tsk.erlc_incnodes = [tsk.outputs[0].parent] + self.to_incnodes(self.includes) tsk.env.append_value('ERLC_INCPATHS', [x.abspath() for x in tsk.erlc_incnodes]) tsk.env.append_value('ERLC_DEFINES', self.to_list(getattr(self, 'defines', []))) tsk.env.append_value('ERLC_FLAGS', self.to_list(getattr(self, 'flags', []))) tsk.cwd = tsk.outputs[0].parent class erl(Task.Task): color = 'GREEN' run_str = '${ERLC} ${ERL_FLAGS} ${ERLC_INC_PATTERN:ERLC_INCPATHS} ${ERLC_DEF_PATTERN:ERLC_DEFINES} ${SRC}' def scan(task): node = task.inputs[0] deps = [] scanned = set([]) nodes_to_scan = [node] for n in nodes_to_scan: if n.abspath() in scanned: continue for i in re.findall(r'-include\("(.*)"\)\.', n.read()): for d in task.erlc_incnodes: r = d.find_node(i) if r: deps.append(r) nodes_to_scan.append(r) break scanned.add(n.abspath()) return (deps, []) @TaskGen.extension('.beam') def process(self, node): pass class erl_test(Task.Task): color = 'BLUE' run_str = '${ERL} ${ERL_FLAGS} ${ERL_TEST_FLAGS}' @feature('eunit') @after_method('process_source') def add_erl_test_run(self): test_modules = [t.outputs[0] for t in self.tasks] test_task = self.create_task('erl_test') test_task.set_inputs(self.source + test_modules) test_task.cwd = test_modules[0].parent test_task.env.append_value('ERL_FLAGS', self.to_list(getattr(self, 'flags', []))) test_list = ", ".join([m.change_ext("").path_from(test_task.cwd)+":test()" for m in test_modules]) test_flag = 'halt(case lists:all(fun(Elem) -> Elem == ok end, [%s]) of true -> 0; false -> 1 end).' % test_list test_task.env.append_value('ERL_TEST_FLAGS', EXEC_NON_INTERACTIVE) test_task.env.append_value('ERL_TEST_FLAGS', test_flag) class edoc(Task.Task): color = 'BLUE' run_str = "${ERL} ${ERL_FLAGS} ${ERL_DOC_FLAGS}" def keyword(self): return 'Generating edoc' @feature('edoc') @before_method('process_source') def add_edoc_task(self): # do not process source, it would create double erl->beam task self.meths.remove('process_source') e = self.path.find_resource(self.source) t = e.change_ext('.html') png = t.parent.make_node('erlang.png') css = t.parent.make_node('stylesheet.css') tsk = self.create_task('edoc', e, [t, png, css]) tsk.cwd = tsk.outputs[0].parent tsk.env.append_value('ERL_DOC_FLAGS', EXEC_NON_INTERACTIVE) tsk.env.append_value('ERL_DOC_FLAGS', 'edoc:files(["%s"]), halt(0).' % tsk.inputs[0].abspath()) # TODO the above can break if a file path contains '"' tdb-1.4.2/third_party/waf/waflib/extras/fast_partial.py0000660000000000000000000003527013527011455023140 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2017-2018 (ita) """ A system for fast partial rebuilds Creating a large amount of task objects up front can take some time. By making a few assumptions, it is possible to avoid posting creating task objects for targets that are already up-to-date. On a silly benchmark the gain observed for 1M tasks can be 5m->10s for a single file change. Usage:: def options(opt): opt.load('fast_partial') Assumptions: * Mostly for C/C++/Fortran targets with link tasks (object-only targets are not handled) try it in the folder generated by utils/genbench.py * For full project builds: no --targets and no pruning from subfolders * The installation phase is ignored * `use=` dependencies are specified up front even across build groups * Task generator source files are not obtained from globs Implementation details: * The first layer obtains file timestamps to recalculate file hashes only when necessary (similar to md5_tstamp); the timestamps are then stored in a dedicated pickle file * A second layer associates each task generator to a file set to help detecting changes. Task generators are to create their tasks only when the related files have been modified. A specific db file is created to store such data (5m -> 1m10) * A third layer binds build context proxies onto task generators, replacing the default context. While loading data for the full build uses more memory (4GB -> 9GB), partial builds are then much faster (1m10 -> 13s) * A fourth layer enables a 2-level cache on file signatures to reduce the size of the main pickle file (13s -> 10s) """ import os from waflib import Build, Context, Errors, Logs, Task, TaskGen, Utils from waflib.TaskGen import feature, after_method, taskgen_method import waflib.Node DONE = 0 DIRTY = 1 NEEDED = 2 SKIPPABLE = ['cshlib', 'cxxshlib', 'cstlib', 'cxxstlib', 'cprogram', 'cxxprogram'] TSTAMP_DB = '.wafpickle_tstamp_db_file' SAVED_ATTRS = 'root node_sigs task_sigs imp_sigs raw_deps node_deps'.split() class bld_proxy(object): def __init__(self, bld): object.__setattr__(self, 'bld', bld) object.__setattr__(self, 'node_class', type('Nod3', (waflib.Node.Node,), {})) self.node_class.__module__ = 'waflib.Node' self.node_class.ctx = self object.__setattr__(self, 'root', self.node_class('', None)) for x in SAVED_ATTRS: if x != 'root': object.__setattr__(self, x, {}) self.fix_nodes() def __setattr__(self, name, value): bld = object.__getattribute__(self, 'bld') setattr(bld, name, value) def __delattr__(self, name): bld = object.__getattribute__(self, 'bld') delattr(bld, name) def __getattribute__(self, name): try: return object.__getattribute__(self, name) except AttributeError: bld = object.__getattribute__(self, 'bld') return getattr(bld, name) def __call__(self, *k, **kw): return self.bld(*k, **kw) def fix_nodes(self): for x in ('srcnode', 'path', 'bldnode'): node = self.root.find_dir(getattr(self.bld, x).abspath()) object.__setattr__(self, x, node) def set_key(self, store_key): object.__setattr__(self, 'store_key', store_key) def fix_tg_path(self, *tgs): # changing Node objects on task generators is possible # yet, all Node objects must belong to the same parent for tg in tgs: tg.path = self.root.make_node(tg.path.abspath()) def restore(self): dbfn = os.path.join(self.variant_dir, Context.DBFILE + self.store_key) Logs.debug('rev_use: reading %s', dbfn) try: data = Utils.readf(dbfn, 'rb') except (EnvironmentError, EOFError): # handle missing file/empty file Logs.debug('rev_use: Could not load the build cache %s (missing)', dbfn) else: try: waflib.Node.pickle_lock.acquire() waflib.Node.Nod3 = self.node_class try: data = Build.cPickle.loads(data) except Exception as e: Logs.debug('rev_use: Could not pickle the build cache %s: %r', dbfn, e) else: for x in SAVED_ATTRS: object.__setattr__(self, x, data.get(x, {})) finally: waflib.Node.pickle_lock.release() self.fix_nodes() def store(self): data = {} for x in Build.SAVED_ATTRS: data[x] = getattr(self, x) db = os.path.join(self.variant_dir, Context.DBFILE + self.store_key) try: waflib.Node.pickle_lock.acquire() waflib.Node.Nod3 = self.node_class x = Build.cPickle.dumps(data, Build.PROTOCOL) finally: waflib.Node.pickle_lock.release() Logs.debug('rev_use: storing %s', db) Utils.writef(db + '.tmp', x, m='wb') try: st = os.stat(db) os.remove(db) if not Utils.is_win32: os.chown(db + '.tmp', st.st_uid, st.st_gid) except (AttributeError, OSError): pass os.rename(db + '.tmp', db) class bld(Build.BuildContext): def __init__(self, **kw): super(bld, self).__init__(**kw) self.hashes_md5_tstamp = {} def __call__(self, *k, **kw): # this is one way of doing it, one could use a task generator method too bld = kw['bld'] = bld_proxy(self) ret = TaskGen.task_gen(*k, **kw) self.task_gen_cache_names = {} self.add_to_group(ret, group=kw.get('group')) ret.bld = bld bld.set_key(ret.path.abspath().replace(os.sep, '') + str(ret.idx)) return ret def is_dirty(self): return True def store_tstamps(self): # Called after a build is finished # For each task generator, record all files involved in task objects # optimization: done only if there was something built do_store = False try: f_deps = self.f_deps except AttributeError: f_deps = self.f_deps = {} self.f_tstamps = {} allfiles = set() for g in self.groups: for tg in g: try: staleness = tg.staleness except AttributeError: staleness = DIRTY if staleness != DIRTY: # DONE case: there was nothing built # NEEDED case: the tg was brought in because of 'use' propagation # but nothing really changed for them, there may be incomplete # tasks (object files) and in this case it is best to let the next build # figure out if an input/output file changed continue do_cache = False for tsk in tg.tasks: if tsk.hasrun == Task.SUCCESS: do_cache = True pass elif tsk.hasrun == Task.SKIPPED: pass else: # one failed task, clear the cache for this tg try: del f_deps[(tg.path.abspath(), tg.idx)] except KeyError: pass else: # just store the new state because there is a change do_store = True # skip the rest because there is no valid cache possible break else: if not do_cache: # all skipped, but is there anything in cache? try: f_deps[(tg.path.abspath(), tg.idx)] except KeyError: # probably cleared because a wscript file changed # store it do_cache = True if do_cache: # there was a rebuild, store the data structure too tg.bld.store() # all tasks skipped but no cache # or a successful task build do_store = True st = set() for tsk in tg.tasks: st.update(tsk.inputs) st.update(self.node_deps.get(tsk.uid(), [])) # TODO do last/when loading the tgs? lst = [] for k in ('wscript', 'wscript_build'): n = tg.path.find_node(k) if n: n.get_bld_sig() lst.append(n.abspath()) lst.extend(sorted(x.abspath() for x in st)) allfiles.update(lst) f_deps[(tg.path.abspath(), tg.idx)] = lst for x in allfiles: # f_tstamps has everything, while md5_tstamp can be relatively empty on partial builds self.f_tstamps[x] = self.hashes_md5_tstamp[x][0] if do_store: dbfn = os.path.join(self.variant_dir, TSTAMP_DB) Logs.debug('rev_use: storing %s', dbfn) dbfn_tmp = dbfn + '.tmp' x = Build.cPickle.dumps([self.f_tstamps, f_deps], Build.PROTOCOL) Utils.writef(dbfn_tmp, x, m='wb') os.rename(dbfn_tmp, dbfn) Logs.debug('rev_use: stored %s', dbfn) def store(self): self.store_tstamps() if self.producer.dirty: Build.BuildContext.store(self) def compute_needed_tgs(self): # assume the 'use' keys are not modified during the build phase dbfn = os.path.join(self.variant_dir, TSTAMP_DB) Logs.debug('rev_use: Loading %s', dbfn) try: data = Utils.readf(dbfn, 'rb') except (EnvironmentError, EOFError): Logs.debug('rev_use: Could not load the build cache %s (missing)', dbfn) self.f_deps = {} self.f_tstamps = {} else: try: self.f_tstamps, self.f_deps = Build.cPickle.loads(data) except Exception as e: Logs.debug('rev_use: Could not pickle the build cache %s: %r', dbfn, e) self.f_deps = {} self.f_tstamps = {} else: Logs.debug('rev_use: Loaded %s', dbfn) # 1. obtain task generators that contain rebuilds # 2. obtain the 'use' graph and its dual stales = set() reverse_use_map = Utils.defaultdict(list) use_map = Utils.defaultdict(list) for g in self.groups: for tg in g: if tg.is_stale(): stales.add(tg) try: lst = tg.use = Utils.to_list(tg.use) except AttributeError: pass else: for x in lst: try: xtg = self.get_tgen_by_name(x) except Errors.WafError: pass else: use_map[tg].append(xtg) reverse_use_map[xtg].append(tg) Logs.debug('rev_use: found %r stale tgs', len(stales)) # 3. dfs to post downstream tg as stale visited = set() def mark_down(tg): if tg in visited: return visited.add(tg) Logs.debug('rev_use: marking down %r as stale', tg.name) tg.staleness = DIRTY for x in reverse_use_map[tg]: mark_down(x) for tg in stales: mark_down(tg) # 4. dfs to find ancestors tg to mark as needed self.needed_tgs = needed_tgs = set() def mark_needed(tg): if tg in needed_tgs: return needed_tgs.add(tg) if tg.staleness == DONE: Logs.debug('rev_use: marking up %r as needed', tg.name) tg.staleness = NEEDED for x in use_map[tg]: mark_needed(x) for xx in visited: mark_needed(xx) # so we have the whole tg trees to post in the set "needed" # load their build trees for tg in needed_tgs: tg.bld.restore() tg.bld.fix_tg_path(tg) # the stale ones should be fully build, while the needed ones # may skip a few tasks, see create_compiled_task and apply_link_after below Logs.debug('rev_use: amount of needed task gens: %r', len(needed_tgs)) def post_group(self): # assumption: we can ignore the folder/subfolders cuts def tgpost(tg): try: f = tg.post except AttributeError: pass else: f() if not self.targets or self.targets == '*': for tg in self.groups[self.current_group]: # this can cut quite a lot of tg objects if tg in self.needed_tgs: tgpost(tg) else: # default implementation return Build.BuildContext.post_group() def get_build_iterator(self): if not self.targets or self.targets == '*': self.compute_needed_tgs() return Build.BuildContext.get_build_iterator(self) @taskgen_method def is_stale(self): # assume no globs self.staleness = DIRTY # 1. the case of always stale targets if getattr(self, 'always_stale', False): return True # 2. check if the db file exists db = os.path.join(self.bld.variant_dir, Context.DBFILE) try: dbstat = os.stat(db).st_mtime except OSError: Logs.debug('rev_use: must post %r because this is a clean build') return True # 3. check if the configuration changed if os.stat(self.bld.bldnode.find_node('c4che/build.config.py').abspath()).st_mtime > dbstat: Logs.debug('rev_use: must post %r because the configuration has changed', self.name) return True # 3.a any tstamp data? try: f_deps = self.bld.f_deps except AttributeError: Logs.debug('rev_use: must post %r because there is no f_deps', self.name) return True # 4. check if this is the first build (no cache) try: lst = f_deps[(self.path.abspath(), self.idx)] except KeyError: Logs.debug('rev_use: must post %r because there it has no cached data', self.name) return True try: cache = self.bld.cache_tstamp_rev_use except AttributeError: cache = self.bld.cache_tstamp_rev_use = {} # 5. check the timestamp of each dependency files listed is unchanged f_tstamps = self.bld.f_tstamps for x in lst: try: old_ts = f_tstamps[x] except KeyError: Logs.debug('rev_use: must post %r because %r is not in cache', self.name, x) return True try: try: ts = cache[x] except KeyError: ts = cache[x] = os.stat(x).st_mtime except OSError: del f_deps[(self.path.abspath(), self.idx)] Logs.debug('rev_use: must post %r because %r does not exist anymore', self.name, x) return True else: if ts != old_ts: Logs.debug('rev_use: must post %r because the timestamp on %r changed %r %r', self.name, x, old_ts, ts) return True self.staleness = DONE return False @taskgen_method def create_compiled_task(self, name, node): # skip the creation of object files # assumption: object-only targets are not skippable if self.staleness == NEEDED: # only libraries/programs can skip object files for x in SKIPPABLE: if x in self.features: return None out = '%s.%d.o' % (node.name, self.idx) task = self.create_task(name, node, node.parent.find_or_declare(out)) try: self.compiled_tasks.append(task) except AttributeError: self.compiled_tasks = [task] return task @feature(*SKIPPABLE) @after_method('apply_link') def apply_link_after(self): # cprogram/cxxprogram might be unnecessary if self.staleness != NEEDED: return for tsk in self.tasks: tsk.hasrun = Task.SKIPPED def path_from(self, node): # handle nodes of distinct types if node.ctx is not self.ctx: node = self.ctx.root.make_node(node.abspath()) return self.default_path_from(node) waflib.Node.Node.default_path_from = waflib.Node.Node.path_from waflib.Node.Node.path_from = path_from def h_file(self): # similar to md5_tstamp.py, but with 2-layer cache # global_cache for the build context common for all task generators # local_cache for the build context proxy (one by task generator) # # the global cache is not persistent # the local cache is persistent and meant for partial builds # # assume all calls are made from a single thread # filename = self.abspath() st = os.stat(filename) global_cache = self.ctx.bld.hashes_md5_tstamp local_cache = self.ctx.hashes_md5_tstamp if filename in global_cache: # value already calculated in this build cval = global_cache[filename] # the value in global cache is assumed to be calculated once # reverifying it could cause task generators # to get distinct tstamp values, thus missing rebuilds local_cache[filename] = cval return cval[1] if filename in local_cache: cval = local_cache[filename] if cval[0] == st.st_mtime: # correct value from a previous build # put it in the global cache global_cache[filename] = cval return cval[1] ret = Utils.h_file(filename) local_cache[filename] = global_cache[filename] = (st.st_mtime, ret) return ret waflib.Node.Node.h_file = h_file tdb-1.4.2/third_party/waf/waflib/extras/fc_bgxlf.py0000660000000000000000000000132613444661622022241 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # harald at klimachs.de from waflib.Tools import fc, fc_config, fc_scan from waflib.Configure import conf from waflib.Tools.compiler_fc import fc_compiler fc_compiler['linux'].insert(0, 'fc_bgxlf') @conf def find_bgxlf(conf): fc = conf.find_program(['bgxlf2003_r','bgxlf2003'], var='FC') conf.get_xlf_version(fc) conf.env.FC_NAME = 'BGXLF' @conf def bg_flags(self): self.env.SONAME_ST = '' self.env.FCSHLIB_MARKER = '' self.env.FCSTLIB_MARKER = '' self.env.FCFLAGS_fcshlib = ['-fPIC'] self.env.LINKFLAGS_fcshlib = ['-G', '-Wl,-bexpfull'] def configure(conf): conf.find_bgxlf() conf.find_ar() conf.fc_flags() conf.fc_add_flags() conf.xlf_flags() conf.bg_flags() tdb-1.4.2/third_party/waf/waflib/extras/fc_cray.py0000660000000000000000000000264613527011455022076 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # harald at klimachs.de import re from waflib.Tools import fc, fc_config, fc_scan from waflib.Configure import conf from waflib.Tools.compiler_fc import fc_compiler fc_compiler['linux'].append('fc_cray') @conf def find_crayftn(conf): """Find the Cray fortran compiler (will look in the environment variable 'FC')""" fc = conf.find_program(['crayftn'], var='FC') conf.get_crayftn_version(fc) conf.env.FC_NAME = 'CRAY' conf.env.FC_MOD_CAPITALIZATION = 'UPPER.mod' @conf def crayftn_flags(conf): v = conf.env v['_FCMODOUTFLAGS'] = ['-em', '-J.'] # enable module files and put them in the current directory v['FCFLAGS_DEBUG'] = ['-m1'] # more verbose compiler warnings v['FCFLAGS_fcshlib'] = ['-h pic'] v['LINKFLAGS_fcshlib'] = ['-h shared'] v['FCSTLIB_MARKER'] = '-h static' v['FCSHLIB_MARKER'] = '-h dynamic' @conf def get_crayftn_version(conf, fc): version_re = re.compile(r"Cray Fortran\s*:\s*Version\s*(?P\d*)\.(?P\d*)", re.I).search cmd = fc + ['-V'] out,err = fc_config.getoutput(conf, cmd, stdin=False) if out: match = version_re(out) else: match = version_re(err) if not match: conf.fatal('Could not determine the Cray Fortran compiler version.') k = match.groupdict() conf.env['FC_VERSION'] = (k['major'], k['minor']) def configure(conf): conf.find_crayftn() conf.find_ar() conf.fc_flags() conf.fc_add_flags() conf.crayftn_flags() tdb-1.4.2/third_party/waf/waflib/extras/fc_nag.py0000660000000000000000000000276013444661622021707 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # harald at klimachs.de import re from waflib import Utils from waflib.Tools import fc,fc_config,fc_scan from waflib.Configure import conf from waflib.Tools.compiler_fc import fc_compiler fc_compiler['linux'].insert(0, 'fc_nag') @conf def find_nag(conf): """Find the NAG Fortran Compiler (will look in the environment variable 'FC')""" fc = conf.find_program(['nagfor'], var='FC') conf.get_nag_version(fc) conf.env.FC_NAME = 'NAG' conf.env.FC_MOD_CAPITALIZATION = 'lower' @conf def nag_flags(conf): v = conf.env v.FCFLAGS_DEBUG = ['-C=all'] v.FCLNK_TGT_F = ['-o', ''] v.FC_TGT_F = ['-c', '-o', ''] @conf def nag_modifier_platform(conf): dest_os = conf.env['DEST_OS'] or Utils.unversioned_sys_platform() nag_modifier_func = getattr(conf, 'nag_modifier_' + dest_os, None) if nag_modifier_func: nag_modifier_func() @conf def get_nag_version(conf, fc): """Get the NAG compiler version""" version_re = re.compile(r"^NAG Fortran Compiler *Release *(?P\d*)\.(?P\d*)", re.M).search cmd = fc + ['-V'] out, err = fc_config.getoutput(conf,cmd,stdin=False) if out: match = version_re(out) if not match: match = version_re(err) else: match = version_re(err) if not match: conf.fatal('Could not determine the NAG version.') k = match.groupdict() conf.env['FC_VERSION'] = (k['major'], k['minor']) def configure(conf): conf.find_nag() conf.find_ar() conf.fc_flags() conf.fc_add_flags() conf.nag_flags() conf.nag_modifier_platform() tdb-1.4.2/third_party/waf/waflib/extras/fc_nec.py0000660000000000000000000000317313527011455021701 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # harald at klimachs.de import re from waflib.Tools import fc, fc_config, fc_scan from waflib.Configure import conf from waflib.Tools.compiler_fc import fc_compiler fc_compiler['linux'].append('fc_nec') @conf def find_sxfc(conf): """Find the NEC fortran compiler (will look in the environment variable 'FC')""" fc = conf.find_program(['sxf90','sxf03'], var='FC') conf.get_sxfc_version(fc) conf.env.FC_NAME = 'NEC' conf.env.FC_MOD_CAPITALIZATION = 'lower' @conf def sxfc_flags(conf): v = conf.env v['_FCMODOUTFLAGS'] = [] # enable module files and put them in the current directory v['FCFLAGS_DEBUG'] = [] # more verbose compiler warnings v['FCFLAGS_fcshlib'] = [] v['LINKFLAGS_fcshlib'] = [] v['FCSTLIB_MARKER'] = '' v['FCSHLIB_MARKER'] = '' @conf def get_sxfc_version(conf, fc): version_re = re.compile(r"FORTRAN90/SX\s*Version\s*(?P\d*)\.(?P\d*)", re.I).search cmd = fc + ['-V'] out,err = fc_config.getoutput(conf, cmd, stdin=False) if out: match = version_re(out) else: match = version_re(err) if not match: version_re=re.compile(r"NEC Fortran 2003 Compiler for\s*(?P\S*)\s*\(c\)\s*(?P\d*)",re.I).search if out: match = version_re(out) else: match = version_re(err) if not match: conf.fatal('Could not determine the NEC Fortran compiler version.') k = match.groupdict() conf.env['FC_VERSION'] = (k['major'], k['minor']) def configure(conf): conf.find_sxfc() conf.find_program('sxar',var='AR') conf.add_os_flags('ARFLAGS') if not conf.env.ARFLAGS: conf.env.ARFLAGS=['rcs'] conf.fc_flags() conf.fc_add_flags() conf.sxfc_flags() tdb-1.4.2/third_party/waf/waflib/extras/fc_nfort.py0000660000000000000000000000243013527011455022257 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Detection of the NEC Fortran compiler for Aurora Tsubasa import re from waflib.Tools import fc,fc_config,fc_scan from waflib.Configure import conf from waflib.Tools.compiler_fc import fc_compiler fc_compiler['linux'].append('fc_nfort') @conf def find_nfort(conf): fc=conf.find_program(['nfort'],var='FC') conf.get_nfort_version(fc) conf.env.FC_NAME='NFORT' conf.env.FC_MOD_CAPITALIZATION='lower' @conf def nfort_flags(conf): v=conf.env v['_FCMODOUTFLAGS']=[] v['FCFLAGS_DEBUG']=[] v['FCFLAGS_fcshlib']=[] v['LINKFLAGS_fcshlib']=[] v['FCSTLIB_MARKER']='' v['FCSHLIB_MARKER']='' @conf def get_nfort_version(conf,fc): version_re=re.compile(r"nfort\s*\(NFORT\)\s*(?P\d+)\.(?P\d+)\.",re.I).search cmd=fc+['--version'] out,err=fc_config.getoutput(conf,cmd,stdin=False) if out: match=version_re(out) else: match=version_re(err) if not match: return(False) conf.fatal('Could not determine the NEC NFORT Fortran compiler version.') else: k=match.groupdict() conf.env['FC_VERSION']=(k['major'],k['minor']) def configure(conf): conf.find_nfort() conf.find_program('nar',var='AR') conf.add_os_flags('ARFLAGS') if not conf.env.ARFLAGS: conf.env.ARFLAGS=['rcs'] conf.fc_flags() conf.fc_add_flags() conf.nfort_flags() tdb-1.4.2/third_party/waf/waflib/extras/fc_open64.py0000660000000000000000000000274613444661622022261 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # harald at klimachs.de import re from waflib import Utils from waflib.Tools import fc,fc_config,fc_scan from waflib.Configure import conf from waflib.Tools.compiler_fc import fc_compiler fc_compiler['linux'].insert(0, 'fc_open64') @conf def find_openf95(conf): """Find the Open64 Fortran Compiler (will look in the environment variable 'FC')""" fc = conf.find_program(['openf95', 'openf90'], var='FC') conf.get_open64_version(fc) conf.env.FC_NAME = 'OPEN64' conf.env.FC_MOD_CAPITALIZATION = 'UPPER.mod' @conf def openf95_flags(conf): v = conf.env v['FCFLAGS_DEBUG'] = ['-fullwarn'] @conf def openf95_modifier_platform(conf): dest_os = conf.env['DEST_OS'] or Utils.unversioned_sys_platform() openf95_modifier_func = getattr(conf, 'openf95_modifier_' + dest_os, None) if openf95_modifier_func: openf95_modifier_func() @conf def get_open64_version(conf, fc): """Get the Open64 compiler version""" version_re = re.compile(r"Open64 Compiler Suite: *Version *(?P\d*)\.(?P\d*)", re.I).search cmd = fc + ['-version'] out, err = fc_config.getoutput(conf,cmd,stdin=False) if out: match = version_re(out) else: match = version_re(err) if not match: conf.fatal('Could not determine the Open64 version.') k = match.groupdict() conf.env['FC_VERSION'] = (k['major'], k['minor']) def configure(conf): conf.find_openf95() conf.find_ar() conf.fc_flags() conf.fc_add_flags() conf.openf95_flags() conf.openf95_modifier_platform() tdb-1.4.2/third_party/waf/waflib/extras/fc_pgfortran.py0000660000000000000000000000336413444661622023145 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # harald at klimachs.de import re from waflib.Tools import fc, fc_config, fc_scan from waflib.Configure import conf from waflib.Tools.compiler_fc import fc_compiler fc_compiler['linux'].append('fc_pgfortran') @conf def find_pgfortran(conf): """Find the PGI fortran compiler (will look in the environment variable 'FC')""" fc = conf.find_program(['pgfortran', 'pgf95', 'pgf90'], var='FC') conf.get_pgfortran_version(fc) conf.env.FC_NAME = 'PGFC' @conf def pgfortran_flags(conf): v = conf.env v['FCFLAGS_fcshlib'] = ['-shared'] v['FCFLAGS_DEBUG'] = ['-Minform=inform', '-Mstandard'] # why not v['FCSTLIB_MARKER'] = '-Bstatic' v['FCSHLIB_MARKER'] = '-Bdynamic' v['SONAME_ST'] = '-soname %s' @conf def get_pgfortran_version(conf,fc): version_re = re.compile(r"The Portland Group", re.I).search cmd = fc + ['-V'] out,err = fc_config.getoutput(conf, cmd, stdin=False) if out: match = version_re(out) else: match = version_re(err) if not match: conf.fatal('Could not verify PGI signature') cmd = fc + ['-help=variable'] out,err = fc_config.getoutput(conf, cmd, stdin=False) if out.find('COMPVER')<0: conf.fatal('Could not determine the compiler type') k = {} prevk = '' out = out.splitlines() for line in out: lst = line.partition('=') if lst[1] == '=': key = lst[0].rstrip() if key == '': key = prevk val = lst[2].rstrip() k[key] = val else: prevk = line.partition(' ')[0] def isD(var): return var in k def isT(var): return var in k and k[var]!='0' conf.env['FC_VERSION'] = (k['COMPVER'].split('.')) def configure(conf): conf.find_pgfortran() conf.find_ar() conf.fc_flags() conf.fc_add_flags() conf.pgfortran_flags() tdb-1.4.2/third_party/waf/waflib/extras/fc_solstudio.py0000660000000000000000000000315213444661622023163 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # harald at klimachs.de import re from waflib import Utils from waflib.Tools import fc,fc_config,fc_scan from waflib.Configure import conf from waflib.Tools.compiler_fc import fc_compiler fc_compiler['linux'].append('fc_solstudio') @conf def find_solstudio(conf): """Find the Solaris Studio compiler (will look in the environment variable 'FC')""" fc = conf.find_program(['sunf95', 'f95', 'sunf90', 'f90'], var='FC') conf.get_solstudio_version(fc) conf.env.FC_NAME = 'SOL' @conf def solstudio_flags(conf): v = conf.env v['FCFLAGS_fcshlib'] = ['-Kpic'] v['FCFLAGS_DEBUG'] = ['-w3'] v['LINKFLAGS_fcshlib'] = ['-G'] v['FCSTLIB_MARKER'] = '-Bstatic' v['FCSHLIB_MARKER'] = '-Bdynamic' v['SONAME_ST'] = '-h %s' @conf def solstudio_modifier_platform(conf): dest_os = conf.env['DEST_OS'] or Utils.unversioned_sys_platform() solstudio_modifier_func = getattr(conf, 'solstudio_modifier_' + dest_os, None) if solstudio_modifier_func: solstudio_modifier_func() @conf def get_solstudio_version(conf, fc): """Get the compiler version""" version_re = re.compile(r"Sun Fortran 95 *(?P\d*)\.(?P\d*)", re.I).search cmd = fc + ['-V'] out, err = fc_config.getoutput(conf,cmd,stdin=False) if out: match = version_re(out) else: match = version_re(err) if not match: conf.fatal('Could not determine the Sun Studio Fortran version.') k = match.groupdict() conf.env['FC_VERSION'] = (k['major'], k['minor']) def configure(conf): conf.find_solstudio() conf.find_ar() conf.fc_flags() conf.fc_add_flags() conf.solstudio_flags() conf.solstudio_modifier_platform() tdb-1.4.2/third_party/waf/waflib/extras/fc_xlf.py0000660000000000000000000000311713444661622021730 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # harald at klimachs.de import re from waflib import Utils,Errors from waflib.Tools import fc,fc_config,fc_scan from waflib.Configure import conf from waflib.Tools.compiler_fc import fc_compiler fc_compiler['aix'].insert(0, 'fc_xlf') @conf def find_xlf(conf): """Find the xlf program (will look in the environment variable 'FC')""" fc = conf.find_program(['xlf2003_r', 'xlf2003', 'xlf95_r', 'xlf95', 'xlf90_r', 'xlf90', 'xlf_r', 'xlf'], var='FC') conf.get_xlf_version(fc) conf.env.FC_NAME='XLF' @conf def xlf_flags(conf): v = conf.env v['FCDEFINES_ST'] = '-WF,-D%s' v['FCFLAGS_fcshlib'] = ['-qpic=small'] v['FCFLAGS_DEBUG'] = ['-qhalt=w'] v['LINKFLAGS_fcshlib'] = ['-Wl,-shared'] @conf def xlf_modifier_platform(conf): dest_os = conf.env['DEST_OS'] or Utils.unversioned_sys_platform() xlf_modifier_func = getattr(conf, 'xlf_modifier_' + dest_os, None) if xlf_modifier_func: xlf_modifier_func() @conf def get_xlf_version(conf, fc): """Get the compiler version""" cmd = fc + ['-qversion'] try: out, err = conf.cmd_and_log(cmd, output=0) except Errors.WafError: conf.fatal('Could not find xlf %r' % cmd) for v in (r"IBM XL Fortran.* V(?P\d*)\.(?P\d*)",): version_re = re.compile(v, re.I).search match = version_re(out or err) if match: k = match.groupdict() conf.env['FC_VERSION'] = (k['major'], k['minor']) break else: conf.fatal('Could not determine the XLF version.') def configure(conf): conf.find_xlf() conf.find_ar() conf.fc_flags() conf.fc_add_flags() conf.xlf_flags() conf.xlf_modifier_platform() tdb-1.4.2/third_party/waf/waflib/extras/file_to_object.py0000660000000000000000000000646513444661622023447 0ustar rootroot00000000000000#!/usr/bin/python # -*- coding: utf-8 -*- # Tool to embed file into objects __author__ = __maintainer__ = "Jérôme Carretero " __copyright__ = "Jérôme Carretero, 2014" """ This tool allows to embed file contents in object files (.o). It is not exactly portable, and the file contents are reachable using various non-portable fashions. The goal here is to provide a functional interface to the embedding of file data in objects. See the ``playground/embedded_resources`` example for an example. Usage:: bld( name='pipeline', # ^ Reference this in use="..." for things using the generated code features='file_to_object', source='some.file', # ^ Name of the file to embed in binary section. ) Known issues: - Destination is named like source, with extension renamed to .o eg. some.file -> some.o """ import os from waflib import Task, TaskGen, Errors def filename_c_escape(x): return x.replace("\\", "\\\\") class file_to_object_s(Task.Task): color = 'CYAN' vars = ['DEST_CPU', 'DEST_BINFMT'] def run(self): name = [] for i, x in enumerate(self.inputs[0].name): if x.isalnum(): name.append(x) else: name.append('_') file = self.inputs[0].abspath() size = os.path.getsize(file) if self.env.DEST_CPU in ('x86_64', 'ia', 'aarch64'): unit = 'quad' align = 8 elif self.env.DEST_CPU in ('x86','arm', 'thumb', 'm68k'): unit = 'long' align = 4 else: raise Errors.WafError("Unsupported DEST_CPU, please report bug!") file = filename_c_escape(file) name = "_binary_" + "".join(name) rodata = ".section .rodata" if self.env.DEST_BINFMT == "mac-o": name = "_" + name rodata = ".section __TEXT,__const" with open(self.outputs[0].abspath(), 'w') as f: f.write(\ """ .global %(name)s_start .global %(name)s_end .global %(name)s_size %(rodata)s %(name)s_start: .incbin "%(file)s" %(name)s_end: .align %(align)d %(name)s_size: .%(unit)s 0x%(size)x """ % locals()) class file_to_object_c(Task.Task): color = 'CYAN' def run(self): name = [] for i, x in enumerate(self.inputs[0].name): if x.isalnum(): name.append(x) else: name.append('_') file = self.inputs[0].abspath() size = os.path.getsize(file) name = "_binary_" + "".join(name) data = self.inputs[0].read('rb') lines, line = [], [] for idx_byte, byte in enumerate(data): line.append(byte) if len(line) > 15 or idx_byte == size-1: lines.append(", ".join(("0x%02x" % ord(x)) for x in line)) line = [] data = ",\n ".join(lines) self.outputs[0].write(\ """ unsigned long %(name)s_size = %(size)dL; char const %(name)s_start[] = { %(data)s }; char const %(name)s_end[] = {}; """ % locals()) @TaskGen.feature('file_to_object') @TaskGen.before_method('process_source') def tg_file_to_object(self): bld = self.bld sources = self.to_nodes(self.source) targets = [] for src in sources: if bld.env.F2O_METHOD == ["asm"]: tgt = src.parent.find_or_declare(src.name + '.f2o.s') tsk = self.create_task('file_to_object_s', src, tgt) tsk.cwd = src.parent.abspath() # verify else: tgt = src.parent.find_or_declare(src.name + '.f2o.c') tsk = self.create_task('file_to_object_c', src, tgt) tsk.cwd = src.parent.abspath() # verify targets.append(tgt) self.source = targets def configure(conf): conf.load('gas') conf.env.F2O_METHOD = ["c"] tdb-1.4.2/third_party/waf/waflib/extras/fluid.py0000660000000000000000000000153613444661622021575 0ustar rootroot00000000000000#!/usr/bin/python # encoding: utf-8 # Grygoriy Fuchedzhy 2009 """ Compile fluid files (fltk graphic library). Use the 'fluid' feature in conjunction with the 'cxx' feature. """ from waflib import Task from waflib.TaskGen import extension class fluid(Task.Task): color = 'BLUE' ext_out = ['.h'] run_str = '${FLUID} -c -o ${TGT[0].abspath()} -h ${TGT[1].abspath()} ${SRC}' @extension('.fl') def process_fluid(self, node): """add the .fl to the source list; the cxx file generated will be compiled when possible""" cpp = node.change_ext('.cpp') hpp = node.change_ext('.hpp') self.create_task('fluid', node, [cpp, hpp]) if 'cxx' in self.features: self.source.append(cpp) def configure(conf): conf.find_program('fluid', var='FLUID') conf.check_cfg(path='fltk-config', package='', args='--cxxflags --ldflags', uselib_store='FLTK', mandatory=True) tdb-1.4.2/third_party/waf/waflib/extras/freeimage.py0000660000000000000000000000410513444661622022411 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # # written by Sylvain Rouquette, 2011 ''' To add the freeimage tool to the waf file: $ ./waf-light --tools=compat15,freeimage or, if you have waf >= 1.6.2 $ ./waf update --files=freeimage The wscript will look like: def options(opt): opt.load('compiler_cxx freeimage') def configure(conf): conf.load('compiler_cxx freeimage') # you can call check_freeimage with some parameters. # It's optional on Linux, it's 'mandatory' on Windows if # you didn't use --fi-path on the command-line # conf.check_freeimage(path='FreeImage/Dist', fip=True) def build(bld): bld(source='main.cpp', target='app', use='FREEIMAGE') ''' from waflib import Utils from waflib.Configure import conf def options(opt): opt.add_option('--fi-path', type='string', default='', dest='fi_path', help='''path to the FreeImage directory \ where the files are e.g. /FreeImage/Dist''') opt.add_option('--fip', action='store_true', default=False, dest='fip', help='link with FreeImagePlus') opt.add_option('--fi-static', action='store_true', default=False, dest='fi_static', help="link as shared libraries") @conf def check_freeimage(self, path=None, fip=False): self.start_msg('Checking FreeImage') if not self.env['CXX']: self.fatal('you must load compiler_cxx before loading freeimage') prefix = self.options.fi_static and 'ST' or '' platform = Utils.unversioned_sys_platform() if platform == 'win32': if not path: self.fatal('you must specify the path to FreeImage. \ use --fi-path=/FreeImage/Dist') else: self.env['INCLUDES_FREEIMAGE'] = path self.env['%sLIBPATH_FREEIMAGE' % prefix] = path libs = ['FreeImage'] if self.options.fip: libs.append('FreeImagePlus') if platform == 'win32': self.env['%sLIB_FREEIMAGE' % prefix] = libs else: self.env['%sLIB_FREEIMAGE' % prefix] = [i.lower() for i in libs] self.end_msg('ok') def configure(conf): platform = Utils.unversioned_sys_platform() if platform == 'win32' and not conf.options.fi_path: return conf.check_freeimage(conf.options.fi_path, conf.options.fip) tdb-1.4.2/third_party/waf/waflib/extras/fsb.py0000660000000000000000000000107413444661622021241 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2011 (ita) """ Fully sequential builds The previous tasks from task generators are re-processed, and this may lead to speed issues Yet, if you are using this, speed is probably a minor concern """ from waflib import Build def options(opt): pass def configure(conf): pass class FSBContext(Build.BuildContext): def __call__(self, *k, **kw): ret = Build.BuildContext.__call__(self, *k, **kw) # evaluate the results immediately Build.BuildContext.compile(self) return ret def compile(self): pass tdb-1.4.2/third_party/waf/waflib/extras/fsc.py0000660000000000000000000000356513444661622021251 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2011 (ita) """ Experimental F# stuff FSC="mono /path/to/fsc.exe" waf configure build """ from waflib import Utils, Task from waflib.TaskGen import before_method, after_method, feature from waflib.Tools import ccroot, cs ccroot.USELIB_VARS['fsc'] = set(['CSFLAGS', 'ASSEMBLIES', 'RESOURCES']) @feature('fs') @before_method('process_source') def apply_fsc(self): cs_nodes = [] no_nodes = [] for x in self.to_nodes(self.source): if x.name.endswith('.fs'): cs_nodes.append(x) else: no_nodes.append(x) self.source = no_nodes bintype = getattr(self, 'type', self.gen.endswith('.dll') and 'library' or 'exe') self.cs_task = tsk = self.create_task('fsc', cs_nodes, self.path.find_or_declare(self.gen)) tsk.env.CSTYPE = '/target:%s' % bintype tsk.env.OUT = '/out:%s' % tsk.outputs[0].abspath() inst_to = getattr(self, 'install_path', bintype=='exe' and '${BINDIR}' or '${LIBDIR}') if inst_to: # note: we are making a copy, so the files added to cs_task.outputs won't be installed automatically mod = getattr(self, 'chmod', bintype=='exe' and Utils.O755 or Utils.O644) self.install_task = self.add_install_files(install_to=inst_to, install_from=self.cs_task.outputs[:], chmod=mod) feature('fs')(cs.use_cs) after_method('apply_fsc')(cs.use_cs) feature('fs')(cs.debug_cs) after_method('apply_fsc', 'use_cs')(cs.debug_cs) class fsc(Task.Task): """ Compile F# files """ color = 'YELLOW' run_str = '${FSC} ${CSTYPE} ${CSFLAGS} ${ASS_ST:ASSEMBLIES} ${RES_ST:RESOURCES} ${OUT} ${SRC}' def configure(conf): """ Find a F# compiler, set the variable FSC for the compiler and FS_NAME (mono or fsc) """ conf.find_program(['fsc.exe', 'fsharpc'], var='FSC') conf.env.ASS_ST = '/r:%s' conf.env.RES_ST = '/resource:%s' conf.env.FS_NAME = 'fsc' if str(conf.env.FSC).lower().find('fsharpc') > -1: conf.env.FS_NAME = 'mono' tdb-1.4.2/third_party/waf/waflib/extras/gccdeps.py0000660000000000000000000001410013527011455022064 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2008-2010 (ita) """ Execute the tasks with gcc -MD, read the dependencies from the .d file and prepare the dependency calculation for the next run. This affects the cxx class, so make sure to load Qt5 after this tool. Usage:: def options(opt): opt.load('compiler_cxx') def configure(conf): conf.load('compiler_cxx gccdeps') """ import os, re, threading from waflib import Task, Logs, Utils, Errors from waflib.Tools import c_preproc from waflib.TaskGen import before_method, feature lock = threading.Lock() gccdeps_flags = ['-MD'] if not c_preproc.go_absolute: gccdeps_flags = ['-MMD'] # Third-party tools are allowed to add extra names in here with append() supported_compilers = ['gcc', 'icc', 'clang'] def scan(self): if not self.__class__.__name__ in self.env.ENABLE_GCCDEPS: return super(self.derived_gccdeps, self).scan() nodes = self.generator.bld.node_deps.get(self.uid(), []) names = [] return (nodes, names) re_o = re.compile(r"\.o$") re_splitter = re.compile(r'(?= 0: return line[sep_idx + 2:] else: return line def path_to_node(base_node, path, cached_nodes): # Take the base node and the path and return a node # Results are cached because searching the node tree is expensive # The following code is executed by threads, it is not safe, so a lock is needed... if getattr(path, '__hash__'): node_lookup_key = (base_node, path) else: # Not hashable, assume it is a list and join into a string node_lookup_key = (base_node, os.path.sep.join(path)) try: lock.acquire() node = cached_nodes[node_lookup_key] except KeyError: node = base_node.find_resource(path) cached_nodes[node_lookup_key] = node finally: lock.release() return node def post_run(self): if not self.__class__.__name__ in self.env.ENABLE_GCCDEPS: return super(self.derived_gccdeps, self).post_run() name = self.outputs[0].abspath() name = re_o.sub('.d', name) try: txt = Utils.readf(name) except EnvironmentError: Logs.error('Could not find a .d dependency file, are cflags/cxxflags overwritten?') raise #os.remove(name) # Compilers have the choice to either output the file's dependencies # as one large Makefile rule: # # /path/to/file.o: /path/to/dep1.h \ # /path/to/dep2.h \ # /path/to/dep3.h \ # ... # # or as many individual rules: # # /path/to/file.o: /path/to/dep1.h # /path/to/file.o: /path/to/dep2.h # /path/to/file.o: /path/to/dep3.h # ... # # So the first step is to sanitize the input by stripping out the left- # hand side of all these lines. After that, whatever remains are the # implicit dependencies of task.outputs[0] txt = '\n'.join([remove_makefile_rule_lhs(line) for line in txt.splitlines()]) # Now join all the lines together txt = txt.replace('\\\n', '') val = txt.strip() val = [x.replace('\\ ', ' ') for x in re_splitter.split(val) if x] nodes = [] bld = self.generator.bld # Dynamically bind to the cache try: cached_nodes = bld.cached_nodes except AttributeError: cached_nodes = bld.cached_nodes = {} for x in val: node = None if os.path.isabs(x): node = path_to_node(bld.root, x, cached_nodes) else: # TODO waf 1.9 - single cwd value path = getattr(bld, 'cwdx', bld.bldnode) # when calling find_resource, make sure the path does not contain '..' x = [k for k in Utils.split_path(x) if k and k != '.'] while '..' in x: idx = x.index('..') if idx == 0: x = x[1:] path = path.parent else: del x[idx] del x[idx-1] node = path_to_node(path, x, cached_nodes) if not node: raise ValueError('could not find %r for %r' % (x, self)) if id(node) == id(self.inputs[0]): # ignore the source file, it is already in the dependencies # this way, successful config tests may be retrieved from the cache continue nodes.append(node) Logs.debug('deps: gccdeps for %s returned %s', self, nodes) bld.node_deps[self.uid()] = nodes bld.raw_deps[self.uid()] = [] try: del self.cache_sig except AttributeError: pass Task.Task.post_run(self) def sig_implicit_deps(self): if not self.__class__.__name__ in self.env.ENABLE_GCCDEPS: return super(self.derived_gccdeps, self).sig_implicit_deps() try: return Task.Task.sig_implicit_deps(self) except Errors.WafError: return Utils.SIG_NIL def wrap_compiled_task(classname): derived_class = type(classname, (Task.classes[classname],), {}) derived_class.derived_gccdeps = derived_class derived_class.post_run = post_run derived_class.scan = scan derived_class.sig_implicit_deps = sig_implicit_deps for k in ('c', 'cxx'): if k in Task.classes: wrap_compiled_task(k) @before_method('process_source') @feature('force_gccdeps') def force_gccdeps(self): self.env.ENABLE_GCCDEPS = ['c', 'cxx'] def configure(conf): # in case someone provides a --enable-gccdeps command-line option if not getattr(conf.options, 'enable_gccdeps', True): return global gccdeps_flags flags = conf.env.GCCDEPS_FLAGS or gccdeps_flags if conf.env.CC_NAME in supported_compilers: try: conf.check(fragment='int main() { return 0; }', features='c force_gccdeps', cflags=flags, msg='Checking for c flags %r' % ''.join(flags)) except Errors.ConfigurationError: pass else: conf.env.append_value('CFLAGS', flags) conf.env.append_unique('ENABLE_GCCDEPS', 'c') if conf.env.CXX_NAME in supported_compilers: try: conf.check(fragment='int main() { return 0; }', features='cxx force_gccdeps', cxxflags=flags, msg='Checking for cxx flags %r' % ''.join(flags)) except Errors.ConfigurationError: pass else: conf.env.append_value('CXXFLAGS', flags) conf.env.append_unique('ENABLE_GCCDEPS', 'cxx') def options(opt): raise ValueError('Do not load gccdeps options') tdb-1.4.2/third_party/waf/waflib/extras/gdbus.py0000660000000000000000000000553213444661622021576 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Copyright Garmin International or its subsidiaries, 2018 # # Heavily based on dbus.py """ Compiles dbus files with **gdbus-codegen** Typical usage:: def options(opt): opt.load('compiler_c gdbus') def configure(conf): conf.load('compiler_c gdbus') def build(bld): tg = bld.program( includes = '.', source = bld.path.ant_glob('*.c'), target = 'gnome-hello') tg.add_gdbus_file('test.xml', 'com.example.example.', 'Example') """ from waflib import Task, Errors, Utils from waflib.TaskGen import taskgen_method, before_method @taskgen_method def add_gdbus_file(self, filename, prefix, namespace, export=False): """ Adds a dbus file to the list of dbus files to process. Store them in the attribute *dbus_lst*. :param filename: xml file to compile :type filename: string :param prefix: interface prefix (--interface-prefix=prefix) :type prefix: string :param mode: C namespace (--c-namespace=namespace) :type mode: string :param export: Export Headers? :type export: boolean """ if not hasattr(self, 'gdbus_lst'): self.gdbus_lst = [] if not 'process_gdbus' in self.meths: self.meths.append('process_gdbus') self.gdbus_lst.append([filename, prefix, namespace, export]) @before_method('process_source') def process_gdbus(self): """ Processes the dbus files stored in the attribute *gdbus_lst* to create :py:class:`gdbus_binding_tool` instances. """ output_node = self.path.get_bld().make_node(['gdbus', self.get_name()]) sources = [] for filename, prefix, namespace, export in getattr(self, 'gdbus_lst', []): node = self.path.find_resource(filename) if not node: raise Errors.WafError('file not found ' + filename) c_file = output_node.find_or_declare(node.change_ext('.c').name) h_file = output_node.find_or_declare(node.change_ext('.h').name) tsk = self.create_task('gdbus_binding_tool', node, [c_file, h_file]) tsk.cwd = output_node.abspath() tsk.env.GDBUS_CODEGEN_INTERFACE_PREFIX = prefix tsk.env.GDBUS_CODEGEN_NAMESPACE = namespace tsk.env.GDBUS_CODEGEN_OUTPUT = node.change_ext('').name sources.append(c_file) if sources: output_node.mkdir() self.source = Utils.to_list(self.source) + sources self.includes = [output_node] + self.to_incnodes(getattr(self, 'includes', [])) if export: self.export_includes = [output_node] + self.to_incnodes(getattr(self, 'export_includes', [])) class gdbus_binding_tool(Task.Task): """ Compiles a dbus file """ color = 'BLUE' ext_out = ['.h', '.c'] run_str = '${GDBUS_CODEGEN} --interface-prefix ${GDBUS_CODEGEN_INTERFACE_PREFIX} --generate-c-code ${GDBUS_CODEGEN_OUTPUT} --c-namespace ${GDBUS_CODEGEN_NAMESPACE} --c-generate-object-manager ${SRC[0].abspath()}' shell = True def configure(conf): """ Detects the program gdbus-codegen and sets ``conf.env.GDBUS_CODEGEN`` """ conf.find_program('gdbus-codegen', var='GDBUS_CODEGEN') tdb-1.4.2/third_party/waf/waflib/extras/gob2.py0000660000000000000000000000047213444661622021321 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Ali Sabil, 2007 from waflib import TaskGen TaskGen.declare_chain( name = 'gob2', rule = '${GOB2} -o ${TGT[0].bld_dir()} ${GOB2FLAGS} ${SRC}', ext_in = '.gob', ext_out = '.c' ) def configure(conf): conf.find_program('gob2', var='GOB2') conf.env['GOB2FLAGS'] = '' tdb-1.4.2/third_party/waf/waflib/extras/halide.py0000660000000000000000000000762313444661622021723 0ustar rootroot00000000000000#!/usr/bin/python # -*- coding: utf-8 -*- # Halide code generation tool __author__ = __maintainer__ = "Jérôme Carretero " __copyright__ = "Jérôme Carretero, 2014" """ Tool to run `Halide `_ code generators. Usage:: bld( name='pipeline', # ^ Reference this in use="..." for things using the generated code #target=['pipeline.o', 'pipeline.h'] # ^ by default, name.{o,h} is added, but you can set the outputs here features='halide', halide_env="HL_TRACE=1 HL_TARGET=host-opencl-gpu_debug", # ^ Environment passed to the generator, # can be a dict, k/v list, or string. args=[], # ^ Command-line arguments to the generator (optional), # eg. to give parameters to the scheduling source='pipeline_gen', # ^ Name of the source executable ) Known issues: - Currently only supports Linux (no ".exe") - Doesn't rerun on input modification when input is part of a build chain, and has been modified externally. """ import os from waflib import Task, Utils, Options, TaskGen, Errors class run_halide_gen(Task.Task): color = 'CYAN' vars = ['HALIDE_ENV', 'HALIDE_ARGS'] run_str = "${SRC[0].abspath()} ${HALIDE_ARGS}" def __str__(self): stuff = "halide" stuff += ("[%s]" % (",".join( ('%s=%s' % (k,v)) for k, v in sorted(self.env.env.items())))) return Task.Task.__str__(self).replace(self.__class__.__name__, stuff) @TaskGen.feature('halide') @TaskGen.before_method('process_source') def halide(self): Utils.def_attrs(self, args=[], halide_env={}, ) bld = self.bld env = self.halide_env try: if isinstance(env, str): env = dict(x.split('=') for x in env.split()) elif isinstance(env, list): env = dict(x.split('=') for x in env) assert isinstance(env, dict) except Exception as e: if not isinstance(e, ValueError) \ and not isinstance(e, AssertionError): raise raise Errors.WafError( "halide_env must be under the form" \ " {'HL_x':'a', 'HL_y':'b'}" \ " or ['HL_x=y', 'HL_y=b']" \ " or 'HL_x=y HL_y=b'") src = self.to_nodes(self.source) assert len(src) == 1, "Only one source expected" src = src[0] args = Utils.to_list(self.args) def change_ext(src, ext): # Return a node with a new extension, in an appropriate folder name = src.name xpos = src.name.rfind('.') if xpos == -1: xpos = len(src.name) newname = name[:xpos] + ext if src.is_child_of(bld.bldnode): node = src.get_src().parent.find_or_declare(newname) else: node = bld.bldnode.find_or_declare(newname) return node def to_nodes(self, lst, path=None): tmp = [] path = path or self.path find = path.find_or_declare if isinstance(lst, self.path.__class__): lst = [lst] for x in Utils.to_list(lst): if isinstance(x, str): node = find(x) else: node = x tmp.append(node) return tmp tgt = to_nodes(self, self.target) if not tgt: tgt = [change_ext(src, '.o'), change_ext(src, '.h')] cwd = tgt[0].parent.abspath() task = self.create_task('run_halide_gen', src, tgt, cwd=cwd) task.env.append_unique('HALIDE_ARGS', args) if task.env.env == []: task.env.env = {} task.env.env.update(env) task.env.HALIDE_ENV = " ".join(("%s=%s" % (k,v)) for (k,v) in sorted(env.items())) task.env.HALIDE_ARGS = args try: self.compiled_tasks.append(task) except AttributeError: self.compiled_tasks = [task] self.source = [] def configure(conf): if Options.options.halide_root is None: conf.check_cfg(package='Halide', args='--cflags --libs') else: halide_root = Options.options.halide_root conf.env.INCLUDES_HALIDE = [ os.path.join(halide_root, "include") ] conf.env.LIBPATH_HALIDE = [ os.path.join(halide_root, "lib") ] conf.env.LIB_HALIDE = ["Halide"] # You might want to add this, while upstream doesn't fix it #conf.env.LIB_HALIDE += ['ncurses', 'dl', 'pthread'] def options(opt): opt.add_option('--halide-root', help="path to Halide include and lib files", ) tdb-1.4.2/third_party/waf/waflib/extras/javatest.py0000770000000000000000000001004513444661622022310 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Federico Pellegrin, 2017 (fedepell) """ Provides Java Unit test support using :py:class:`waflib.Tools.waf_unit_test.utest` task via the **javatest** feature. This gives the possibility to run unit test and have them integrated into the standard waf unit test environment. It has been tested with TestNG and JUnit but should be easily expandable to other frameworks given the flexibility of ut_str provided by the standard waf unit test environment. Example usage: def options(opt): opt.load('java waf_unit_test javatest') def configure(conf): conf.load('java javatest') def build(bld): [ ... mainprog is built here ... ] bld(features = 'javac javatest', srcdir = 'test/', outdir = 'test', sourcepath = ['test'], classpath = [ 'src' ], basedir = 'test', use = ['JAVATEST', 'mainprog'], # mainprog is the program being tested in src/ ut_str = 'java -cp ${CLASSPATH} ${JTRUNNER} ${SRC}', jtest_source = bld.path.ant_glob('test/*.xml'), ) At command line the CLASSPATH where to find the testing environment and the test runner (default TestNG) that will then be seen in the environment as CLASSPATH_JAVATEST (then used for use) and JTRUNNER and can be used for dependencies and ut_str generation. Example configure for TestNG: waf configure --jtpath=/tmp/testng-6.12.jar:/tmp/jcommander-1.71.jar --jtrunner=org.testng.TestNG or as default runner is TestNG: waf configure --jtpath=/tmp/testng-6.12.jar:/tmp/jcommander-1.71.jar Example configure for JUnit: waf configure --jtpath=/tmp/junit.jar --jtrunner=org.junit.runner.JUnitCore The runner class presence on the system is checked for at configuration stage. """ import os from waflib import Task, TaskGen, Options @TaskGen.feature('javatest') @TaskGen.after_method('apply_java', 'use_javac_files', 'set_classpath') def make_javatest(self): """ Creates a ``utest`` task with a populated environment for Java Unit test execution """ tsk = self.create_task('utest') tsk.set_run_after(self.javac_task) # Put test input files as waf_unit_test relies on that for some prints and log generation # If jtest_source is there, this is specially useful for passing XML for TestNG # that contain test specification, use that as inputs, otherwise test sources if getattr(self, 'jtest_source', None): tsk.inputs = self.to_nodes(self.jtest_source) else: if self.javac_task.srcdir[0].exists(): tsk.inputs = self.javac_task.srcdir[0].ant_glob('**/*.java', remove=False) if getattr(self, 'ut_str', None): self.ut_run, lst = Task.compile_fun(self.ut_str, shell=getattr(self, 'ut_shell', False)) tsk.vars = lst + tsk.vars if getattr(self, 'ut_cwd', None): if isinstance(self.ut_cwd, str): # we want a Node instance if os.path.isabs(self.ut_cwd): self.ut_cwd = self.bld.root.make_node(self.ut_cwd) else: self.ut_cwd = self.path.make_node(self.ut_cwd) else: self.ut_cwd = self.bld.bldnode # Get parent CLASSPATH and add output dir of test, we run from wscript dir # We have to change it from list to the standard java -cp format (: separated) tsk.env.CLASSPATH = ':'.join(self.env.CLASSPATH) + ':' + self.outdir.abspath() if not self.ut_cwd.exists(): self.ut_cwd.mkdir() if not hasattr(self, 'ut_env'): self.ut_env = dict(os.environ) def configure(ctx): cp = ctx.env.CLASSPATH or '.' if getattr(Options.options, 'jtpath', None): ctx.env.CLASSPATH_JAVATEST = getattr(Options.options, 'jtpath').split(':') cp += ':' + getattr(Options.options, 'jtpath') if getattr(Options.options, 'jtrunner', None): ctx.env.JTRUNNER = getattr(Options.options, 'jtrunner') if ctx.check_java_class(ctx.env.JTRUNNER, with_classpath=cp): ctx.fatal('Could not run test class %r' % ctx.env.JTRUNNER) def options(opt): opt.add_option('--jtpath', action='store', default='', dest='jtpath', help='Path to jar(s) needed for javatest execution, colon separated, if not in the system CLASSPATH') opt.add_option('--jtrunner', action='store', default='org.testng.TestNG', dest='jtrunner', help='Class to run javatest test [default: org.testng.TestNG]') tdb-1.4.2/third_party/waf/waflib/extras/kde4.py0000660000000000000000000000524313527011455021313 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2010 (ita) """ Support for the KDE4 libraries and msgfmt """ import os, re from waflib import Task, Utils from waflib.TaskGen import feature @feature('msgfmt') def apply_msgfmt(self): """ Process all languages to create .mo files and to install them:: def build(bld): bld(features='msgfmt', langs='es de fr', appname='myapp', install_path='${KDE4_LOCALE_INSTALL_DIR}') """ for lang in self.to_list(self.langs): node = self.path.find_resource(lang+'.po') task = self.create_task('msgfmt', node, node.change_ext('.mo')) langname = lang.split('/') langname = langname[-1] inst = getattr(self, 'install_path', '${KDE4_LOCALE_INSTALL_DIR}') self.add_install_as( inst_to = inst + os.sep + langname + os.sep + 'LC_MESSAGES' + os.sep + getattr(self, 'appname', 'set_your_appname') + '.mo', inst_from = task.outputs[0], chmod = getattr(self, 'chmod', Utils.O644)) class msgfmt(Task.Task): """ Transform .po files into .mo files """ color = 'BLUE' run_str = '${MSGFMT} ${SRC} -o ${TGT}' def configure(self): """ Detect kde4-config and set various variables for the *use* system:: def options(opt): opt.load('compiler_cxx kde4') def configure(conf): conf.load('compiler_cxx kde4') def build(bld): bld.program(source='main.c', target='app', use='KDECORE KIO KHTML') """ kdeconfig = self.find_program('kde4-config') prefix = self.cmd_and_log(kdeconfig + ['--prefix']).strip() fname = '%s/share/apps/cmake/modules/KDELibsDependencies.cmake' % prefix try: os.stat(fname) except OSError: fname = '%s/share/kde4/apps/cmake/modules/KDELibsDependencies.cmake' % prefix try: os.stat(fname) except OSError: self.fatal('could not open %s' % fname) try: txt = Utils.readf(fname) except EnvironmentError: self.fatal('could not read %s' % fname) txt = txt.replace('\\\n', '\n') fu = re.compile('#(.*)\n') txt = fu.sub('', txt) setregexp = re.compile(r'([sS][eE][tT]\s*\()\s*([^\s]+)\s+\"([^"]+)\"\)') found = setregexp.findall(txt) for (_, key, val) in found: #print key, val self.env[key] = val # well well, i could just write an interpreter for cmake files self.env['LIB_KDECORE']= ['kdecore'] self.env['LIB_KDEUI'] = ['kdeui'] self.env['LIB_KIO'] = ['kio'] self.env['LIB_KHTML'] = ['khtml'] self.env['LIB_KPARTS'] = ['kparts'] self.env['LIBPATH_KDECORE'] = [os.path.join(self.env.KDE4_LIB_INSTALL_DIR, 'kde4', 'devel'), self.env.KDE4_LIB_INSTALL_DIR] self.env['INCLUDES_KDECORE'] = [self.env['KDE4_INCLUDE_INSTALL_DIR']] self.env.append_value('INCLUDES_KDECORE', [self.env['KDE4_INCLUDE_INSTALL_DIR']+ os.sep + 'KDE']) self.find_program('msgfmt', var='MSGFMT') tdb-1.4.2/third_party/waf/waflib/extras/local_rpath.py0000660000000000000000000000103213444661622022751 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2011 (ita) from waflib.TaskGen import after_method, feature @after_method('propagate_uselib_vars') @feature('cprogram', 'cshlib', 'cxxprogram', 'cxxshlib', 'fcprogram', 'fcshlib') def add_rpath_stuff(self): all = self.to_list(getattr(self, 'use', [])) while all: name = all.pop() try: tg = self.bld.get_tgen_by_name(name) except: continue self.env.append_value('RPATH', tg.link_task.outputs[0].parent.abspath()) all.extend(self.to_list(getattr(tg, 'use', []))) tdb-1.4.2/third_party/waf/waflib/extras/make.py0000660000000000000000000000620213444661622021402 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2011 (ita) """ A make-like way of executing the build, following the relationships between inputs/outputs This algorithm will lead to slower builds, will not be as flexible as "waf build", but it might be useful for building data files (?) It is likely to break in the following cases: - files are created dynamically (no inputs or outputs) - headers - building two files from different groups """ import re from waflib import Options, Task from waflib.Build import BuildContext class MakeContext(BuildContext): '''executes tasks in a step-by-step manner, following dependencies between inputs/outputs''' cmd = 'make' fun = 'build' def __init__(self, **kw): super(MakeContext, self).__init__(**kw) self.files = Options.options.files def get_build_iterator(self): if not self.files: while 1: yield super(MakeContext, self).get_build_iterator() for g in self.groups: for tg in g: try: f = tg.post except AttributeError: pass else: f() provides = {} uses = {} all_tasks = [] tasks = [] for pat in self.files.split(','): matcher = self.get_matcher(pat) for tg in g: if isinstance(tg, Task.Task): lst = [tg] else: lst = tg.tasks for tsk in lst: all_tasks.append(tsk) do_exec = False for node in tsk.inputs: try: uses[node].append(tsk) except: uses[node] = [tsk] if matcher(node, output=False): do_exec = True break for node in tsk.outputs: try: provides[node].append(tsk) except: provides[node] = [tsk] if matcher(node, output=True): do_exec = True break if do_exec: tasks.append(tsk) # so we have the tasks that we need to process, the list of all tasks, # the map of the tasks providing nodes, and the map of tasks using nodes if not tasks: # if there are no tasks matching, return everything in the current group result = all_tasks else: # this is like a big filter... result = set() seen = set() cur = set(tasks) while cur: result |= cur tosee = set() for tsk in cur: for node in tsk.inputs: if node in seen: continue seen.add(node) tosee |= set(provides.get(node, [])) cur = tosee result = list(result) Task.set_file_constraints(result) Task.set_precedence_constraints(result) yield result while 1: yield [] def get_matcher(self, pat): # this returns a function inn = True out = True if pat.startswith('in:'): out = False pat = pat.replace('in:', '') elif pat.startswith('out:'): inn = False pat = pat.replace('out:', '') anode = self.root.find_node(pat) pattern = None if not anode: if not pat.startswith('^'): pat = '^.+?%s' % pat if not pat.endswith('$'): pat = '%s$' % pat pattern = re.compile(pat) def match(node, output): if output and not out: return False if not output and not inn: return False if anode: return anode == node else: return pattern.match(node.abspath()) return match tdb-1.4.2/third_party/waf/waflib/extras/midl.py0000660000000000000000000000324313444661622021414 0ustar rootroot00000000000000#!/usr/bin/env python # Issue 1185 ultrix gmail com """ Microsoft Interface Definition Language support. Given ComObject.idl, this tool will generate ComObject.tlb ComObject_i.h ComObject_i.c ComObject_p.c and dlldata.c To declare targets using midl:: def configure(conf): conf.load('msvc') conf.load('midl') def build(bld): bld( features='c cshlib', # Note: ComObject_i.c is generated from ComObject.idl source = 'main.c ComObject.idl ComObject_i.c', target = 'ComObject.dll') """ from waflib import Task, Utils from waflib.TaskGen import feature, before_method import os def configure(conf): conf.find_program(['midl'], var='MIDL') conf.env.MIDLFLAGS = [ '/nologo', '/D', '_DEBUG', '/W1', '/char', 'signed', '/Oicf', ] @feature('c', 'cxx') @before_method('process_source') def idl_file(self): # Do this before process_source so that the generated header can be resolved # when scanning source dependencies. idl_nodes = [] src_nodes = [] for node in Utils.to_list(self.source): if str(node).endswith('.idl'): idl_nodes.append(node) else: src_nodes.append(node) for node in self.to_nodes(idl_nodes): t = node.change_ext('.tlb') h = node.change_ext('_i.h') c = node.change_ext('_i.c') p = node.change_ext('_p.c') d = node.parent.find_or_declare('dlldata.c') self.create_task('midl', node, [t, h, c, p, d]) self.source = src_nodes class midl(Task.Task): """ Compile idl files """ color = 'YELLOW' run_str = '${MIDL} ${MIDLFLAGS} ${CPPPATH_ST:INCLUDES} /tlb ${TGT[0].bldpath()} /header ${TGT[1].bldpath()} /iid ${TGT[2].bldpath()} /proxy ${TGT[3].bldpath()} /dlldata ${TGT[4].bldpath()} ${SRC}' before = ['winrc'] tdb-1.4.2/third_party/waf/waflib/extras/msvcdeps.py0000660000000000000000000001652313527011455022313 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Copyright Garmin International or its subsidiaries, 2012-2013 ''' Off-load dependency scanning from Python code to MSVC compiler This tool is safe to load in any environment; it will only activate the MSVC exploits when it finds that a particular taskgen uses MSVC to compile. Empirical testing shows about a 10% execution time savings from using this tool as compared to c_preproc. The technique of gutting scan() and pushing the dependency calculation down to post_run() is cribbed from gccdeps.py. This affects the cxx class, so make sure to load Qt5 after this tool. Usage:: def options(opt): opt.load('compiler_cxx') def configure(conf): conf.load('compiler_cxx msvcdeps') ''' import os, sys, tempfile, threading from waflib import Context, Errors, Logs, Task, Utils from waflib.Tools import c_preproc, c, cxx, msvc from waflib.TaskGen import feature, before_method lock = threading.Lock() nodes = {} # Cache the path -> Node lookup PREPROCESSOR_FLAG = '/showIncludes' INCLUDE_PATTERN = 'Note: including file:' # Extensible by outside tools supported_compilers = ['msvc'] @feature('c', 'cxx') @before_method('process_source') def apply_msvcdeps_flags(taskgen): if taskgen.env.CC_NAME not in supported_compilers: return for flag in ('CFLAGS', 'CXXFLAGS'): if taskgen.env.get_flat(flag).find(PREPROCESSOR_FLAG) < 0: taskgen.env.append_value(flag, PREPROCESSOR_FLAG) def path_to_node(base_node, path, cached_nodes): ''' Take the base node and the path and return a node Results are cached because searching the node tree is expensive The following code is executed by threads, it is not safe, so a lock is needed... ''' # normalize the path because ant_glob() does not understand # parent path components (..) path = os.path.normpath(path) # normalize the path case to increase likelihood of a cache hit path = os.path.normcase(path) # ant_glob interprets [] and () characters, so those must be replaced path = path.replace('[', '?').replace(']', '?').replace('(', '[(]').replace(')', '[)]') node_lookup_key = (base_node, path) try: node = cached_nodes[node_lookup_key] except KeyError: # retry with lock on cache miss with lock: try: node = cached_nodes[node_lookup_key] except KeyError: node_list = base_node.ant_glob([path], ignorecase=True, remove=False, quiet=True, regex=False) node = cached_nodes[node_lookup_key] = node_list[0] if node_list else None return node def post_run(self): if self.env.CC_NAME not in supported_compilers: return super(self.derived_msvcdeps, self).post_run() # TODO this is unlikely to work with netcache if getattr(self, 'cached', None): return Task.Task.post_run(self) bld = self.generator.bld unresolved_names = [] resolved_nodes = [] # Dynamically bind to the cache try: cached_nodes = bld.cached_nodes except AttributeError: cached_nodes = bld.cached_nodes = {} for path in self.msvcdeps_paths: node = None if os.path.isabs(path): node = path_to_node(bld.root, path, cached_nodes) else: # when calling find_resource, make sure the path does not begin with '..' base_node = bld.bldnode path = [k for k in Utils.split_path(path) if k and k != '.'] while path[0] == '..': path.pop(0) base_node = base_node.parent path = os.sep.join(path) node = path_to_node(base_node, path, cached_nodes) if not node: raise ValueError('could not find %r for %r' % (path, self)) else: if not c_preproc.go_absolute: if not (node.is_child_of(bld.srcnode) or node.is_child_of(bld.bldnode)): # System library Logs.debug('msvcdeps: Ignoring system include %r', node) continue if id(node) == id(self.inputs[0]): # Self-dependency continue resolved_nodes.append(node) bld.node_deps[self.uid()] = resolved_nodes bld.raw_deps[self.uid()] = unresolved_names try: del self.cache_sig except AttributeError: pass Task.Task.post_run(self) def scan(self): if self.env.CC_NAME not in supported_compilers: return super(self.derived_msvcdeps, self).scan() resolved_nodes = self.generator.bld.node_deps.get(self.uid(), []) unresolved_names = [] return (resolved_nodes, unresolved_names) def sig_implicit_deps(self): if self.env.CC_NAME not in supported_compilers: return super(self.derived_msvcdeps, self).sig_implicit_deps() try: return Task.Task.sig_implicit_deps(self) except Errors.WafError: return Utils.SIG_NIL def exec_command(self, cmd, **kw): if self.env.CC_NAME not in supported_compilers: return super(self.derived_msvcdeps, self).exec_command(cmd, **kw) if not 'cwd' in kw: kw['cwd'] = self.get_cwd() if self.env.PATH: env = kw['env'] = dict(kw.get('env') or self.env.env or os.environ) env['PATH'] = self.env.PATH if isinstance(self.env.PATH, str) else os.pathsep.join(self.env.PATH) # The Visual Studio IDE adds an environment variable that causes # the MS compiler to send its textual output directly to the # debugging window rather than normal stdout/stderr. # # This is unrecoverably bad for this tool because it will cause # all the dependency scanning to see an empty stdout stream and # assume that the file being compiled uses no headers. # # See http://blogs.msdn.com/b/freik/archive/2006/04/05/569025.aspx # # Attempting to repair the situation by deleting the offending # envvar at this point in tool execution will not be good enough-- # its presence poisons the 'waf configure' step earlier. We just # want to put a sanity check here in order to help developers # quickly diagnose the issue if an otherwise-good Waf tree # is then executed inside the MSVS IDE. assert 'VS_UNICODE_OUTPUT' not in kw['env'] cmd, args = self.split_argfile(cmd) try: (fd, tmp) = tempfile.mkstemp() os.write(fd, '\r\n'.join(args).encode()) os.close(fd) self.msvcdeps_paths = [] kw['env'] = kw.get('env', os.environ.copy()) kw['cwd'] = kw.get('cwd', os.getcwd()) kw['quiet'] = Context.STDOUT kw['output'] = Context.STDOUT out = [] if Logs.verbose: Logs.debug('argfile: @%r -> %r', tmp, args) try: raw_out = self.generator.bld.cmd_and_log(cmd + ['@' + tmp], **kw) ret = 0 except Errors.WafError as e: # Use e.msg if e.stdout is not set raw_out = getattr(e, 'stdout', e.msg) # Return non-zero error code even if we didn't # get one from the exception object ret = getattr(e, 'returncode', 1) for line in raw_out.splitlines(): if line.startswith(INCLUDE_PATTERN): inc_path = line[len(INCLUDE_PATTERN):].strip() Logs.debug('msvcdeps: Regex matched %s', inc_path) self.msvcdeps_paths.append(inc_path) else: out.append(line) # Pipe through the remaining stdout content (not related to /showIncludes) if self.generator.bld.logger: self.generator.bld.logger.debug('out: %s' % os.linesep.join(out)) else: sys.stdout.write(os.linesep.join(out) + os.linesep) return ret finally: try: os.remove(tmp) except OSError: # anti-virus and indexers can keep files open -_- pass def wrap_compiled_task(classname): derived_class = type(classname, (Task.classes[classname],), {}) derived_class.derived_msvcdeps = derived_class derived_class.post_run = post_run derived_class.scan = scan derived_class.sig_implicit_deps = sig_implicit_deps derived_class.exec_command = exec_command for k in ('c', 'cxx'): if k in Task.classes: wrap_compiled_task(k) def options(opt): raise ValueError('Do not load msvcdeps options') tdb-1.4.2/third_party/waf/waflib/extras/msvs.py0000660000000000000000000007452213444661622021467 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Avalanche Studios 2009-2011 # Thomas Nagy 2011 """ Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ """ To add this tool to your project: def options(conf): opt.load('msvs') It can be a good idea to add the sync_exec tool too. To generate solution files: $ waf configure msvs To customize the outputs, provide subclasses in your wscript files:: from waflib.extras import msvs class vsnode_target(msvs.vsnode_target): def get_build_command(self, props): # likely to be required return "waf.bat build" def collect_source(self): # likely to be required ... class msvs_bar(msvs.msvs_generator): def init(self): msvs.msvs_generator.init(self) self.vsnode_target = vsnode_target The msvs class re-uses the same build() function for reading the targets (task generators), you may therefore specify msvs settings on the context object:: def build(bld): bld.solution_name = 'foo.sln' bld.waf_command = 'waf.bat' bld.projects_dir = bld.srcnode.make_node('.depproj') bld.projects_dir.mkdir() For visual studio 2008, the command is called 'msvs2008', and the classes such as vsnode_target are wrapped by a decorator class 'wrap_2008' to provide special functionality. To customize platform toolsets, pass additional parameters, for example:: class msvs_2013(msvs.msvs_generator): cmd = 'msvs2013' numver = '13.00' vsver = '2013' platform_toolset_ver = 'v120' ASSUMPTIONS: * a project can be either a directory or a target, vcxproj files are written only for targets that have source files * each project is a vcxproj file, therefore the project uuid needs only to be a hash of the absolute path """ import os, re, sys import uuid # requires python 2.5 from waflib.Build import BuildContext from waflib import Utils, TaskGen, Logs, Task, Context, Node, Options HEADERS_GLOB = '**/(*.h|*.hpp|*.H|*.inl)' PROJECT_TEMPLATE = r''' ${for b in project.build_properties} ${b.configuration} ${b.platform} ${endfor} {${project.uuid}} MakeFileProj ${project.name} ${for b in project.build_properties} Makefile ${b.outdir} ${project.platform_toolset_ver} ${endfor} ${for b in project.build_properties} ${endfor} ${for b in project.build_properties} ${xml:project.get_build_command(b)} ${xml:project.get_rebuild_command(b)} ${xml:project.get_clean_command(b)} ${xml:b.includes_search_path} ${xml:b.preprocessor_definitions};$(NMakePreprocessorDefinitions) ${xml:b.includes_search_path} $(ExecutablePath) ${if getattr(b, 'output_file', None)} ${xml:b.output_file} ${endif} ${if getattr(b, 'deploy_dir', None)} ${xml:b.deploy_dir} ${endif} ${endfor} ${for b in project.build_properties} ${if getattr(b, 'deploy_dir', None)} CopyToHardDrive ${endif} ${endfor} ${for x in project.source} <${project.get_key(x)} Include='${x.win32path()}' /> ${endfor} ''' FILTER_TEMPLATE = ''' ${for x in project.source} <${project.get_key(x)} Include="${x.win32path()}"> ${project.get_filter_name(x.parent)} ${endfor} ${for x in project.dirs()} {${project.make_uuid(x.win32path())}} ${endfor} ''' PROJECT_2008_TEMPLATE = r''' ${if project.build_properties} ${for b in project.build_properties} ${endfor} ${else} ${endif} ${if project.build_properties} ${for b in project.build_properties} ${endfor} ${else} ${endif} ${project.display_filter()} ''' SOLUTION_TEMPLATE = '''Microsoft Visual Studio Solution File, Format Version ${project.numver} # Visual Studio ${project.vsver} ${for p in project.all_projects} Project("{${p.ptype()}}") = "${p.name}", "${p.title}", "{${p.uuid}}" EndProject${endfor} Global GlobalSection(SolutionConfigurationPlatforms) = preSolution ${if project.all_projects} ${for (configuration, platform) in project.all_projects[0].ctx.project_configurations()} ${configuration}|${platform} = ${configuration}|${platform} ${endfor} ${endif} EndGlobalSection GlobalSection(ProjectConfigurationPlatforms) = postSolution ${for p in project.all_projects} ${if hasattr(p, 'source')} ${for b in p.build_properties} {${p.uuid}}.${b.configuration}|${b.platform}.ActiveCfg = ${b.configuration}|${b.platform} ${if getattr(p, 'is_active', None)} {${p.uuid}}.${b.configuration}|${b.platform}.Build.0 = ${b.configuration}|${b.platform} ${endif} ${if getattr(p, 'is_deploy', None)} {${p.uuid}}.${b.configuration}|${b.platform}.Deploy.0 = ${b.configuration}|${b.platform} ${endif} ${endfor} ${endif} ${endfor} EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE EndGlobalSection GlobalSection(NestedProjects) = preSolution ${for p in project.all_projects} ${if p.parent} {${p.uuid}} = {${p.parent.uuid}} ${endif} ${endfor} EndGlobalSection EndGlobal ''' COMPILE_TEMPLATE = '''def f(project): lst = [] def xml_escape(value): return value.replace("&", "&").replace('"', """).replace("'", "'").replace("<", "<").replace(">", ">") %s #f = open('cmd.txt', 'w') #f.write(str(lst)) #f.close() return ''.join(lst) ''' reg_act = re.compile(r"(?P\\)|(?P\$\$)|(?P\$\{(?P[^}]*?)\})", re.M) def compile_template(line): """ Compile a template expression into a python function (like jsps, but way shorter) """ extr = [] def repl(match): g = match.group if g('dollar'): return "$" elif g('backslash'): return "\\" elif g('subst'): extr.append(g('code')) return "<<|@|>>" return None line2 = reg_act.sub(repl, line) params = line2.split('<<|@|>>') assert(extr) indent = 0 buf = [] app = buf.append def app(txt): buf.append(indent * '\t' + txt) for x in range(len(extr)): if params[x]: app("lst.append(%r)" % params[x]) f = extr[x] if f.startswith(('if', 'for')): app(f + ':') indent += 1 elif f.startswith('py:'): app(f[3:]) elif f.startswith(('endif', 'endfor')): indent -= 1 elif f.startswith(('else', 'elif')): indent -= 1 app(f + ':') indent += 1 elif f.startswith('xml:'): app('lst.append(xml_escape(%s))' % f[4:]) else: #app('lst.append((%s) or "cannot find %s")' % (f, f)) app('lst.append(%s)' % f) if extr: if params[-1]: app("lst.append(%r)" % params[-1]) fun = COMPILE_TEMPLATE % "\n\t".join(buf) #print(fun) return Task.funex(fun) re_blank = re.compile('(\n|\r|\\s)*\n', re.M) def rm_blank_lines(txt): txt = re_blank.sub('\r\n', txt) return txt BOM = '\xef\xbb\xbf' try: BOM = bytes(BOM, 'latin-1') # python 3 except TypeError: pass def stealth_write(self, data, flags='wb'): try: unicode except NameError: data = data.encode('utf-8') # python 3 else: data = data.decode(sys.getfilesystemencoding(), 'replace') data = data.encode('utf-8') if self.name.endswith(('.vcproj', '.vcxproj')): data = BOM + data try: txt = self.read(flags='rb') if txt != data: raise ValueError('must write') except (IOError, ValueError): self.write(data, flags=flags) else: Logs.debug('msvs: skipping %s', self.win32path()) Node.Node.stealth_write = stealth_write re_win32 = re.compile(r'^([/\\]cygdrive)?[/\\]([a-z])([^a-z0-9_-].*)', re.I) def win32path(self): p = self.abspath() m = re_win32.match(p) if m: return "%s:%s" % (m.group(2).upper(), m.group(3)) return p Node.Node.win32path = win32path re_quote = re.compile("[^a-zA-Z0-9-]") def quote(s): return re_quote.sub("_", s) def xml_escape(value): return value.replace("&", "&").replace('"', """).replace("'", "'").replace("<", "<").replace(">", ">") def make_uuid(v, prefix = None): """ simple utility function """ if isinstance(v, dict): keys = list(v.keys()) keys.sort() tmp = str([(k, v[k]) for k in keys]) else: tmp = str(v) d = Utils.md5(tmp.encode()).hexdigest().upper() if prefix: d = '%s%s' % (prefix, d[8:]) gid = uuid.UUID(d, version = 4) return str(gid).upper() def diff(node, fromnode): # difference between two nodes, but with "(..)" instead of ".." c1 = node c2 = fromnode c1h = c1.height() c2h = c2.height() lst = [] up = 0 while c1h > c2h: lst.append(c1.name) c1 = c1.parent c1h -= 1 while c2h > c1h: up += 1 c2 = c2.parent c2h -= 1 while id(c1) != id(c2): lst.append(c1.name) up += 1 c1 = c1.parent c2 = c2.parent for i in range(up): lst.append('(..)') lst.reverse() return tuple(lst) class build_property(object): pass class vsnode(object): """ Abstract class representing visual studio elements We assume that all visual studio nodes have a uuid and a parent """ def __init__(self, ctx): self.ctx = ctx # msvs context self.name = '' # string, mandatory self.vspath = '' # path in visual studio (name for dirs, absolute path for projects) self.uuid = '' # string, mandatory self.parent = None # parent node for visual studio nesting def get_waf(self): """ Override in subclasses... """ return 'cd /d "%s" & %s' % (self.ctx.srcnode.win32path(), getattr(self.ctx, 'waf_command', 'waf.bat')) def ptype(self): """ Return a special uuid for projects written in the solution file """ pass def write(self): """ Write the project file, by default, do nothing """ pass def make_uuid(self, val): """ Alias for creating uuid values easily (the templates cannot access global variables) """ return make_uuid(val) class vsnode_vsdir(vsnode): """ Nodes representing visual studio folders (which do not match the filesystem tree!) """ VS_GUID_SOLUTIONFOLDER = "2150E333-8FDC-42A3-9474-1A3956D46DE8" def __init__(self, ctx, uuid, name, vspath=''): vsnode.__init__(self, ctx) self.title = self.name = name self.uuid = uuid self.vspath = vspath or name def ptype(self): return self.VS_GUID_SOLUTIONFOLDER class vsnode_project(vsnode): """ Abstract class representing visual studio project elements A project is assumed to be writable, and has a node representing the file to write to """ VS_GUID_VCPROJ = "8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942" def ptype(self): return self.VS_GUID_VCPROJ def __init__(self, ctx, node): vsnode.__init__(self, ctx) self.path = node self.uuid = make_uuid(node.win32path()) self.name = node.name self.platform_toolset_ver = getattr(ctx, 'platform_toolset_ver', None) self.title = self.path.win32path() self.source = [] # list of node objects self.build_properties = [] # list of properties (nmake commands, output dir, etc) def dirs(self): """ Get the list of parent folders of the source files (header files included) for writing the filters """ lst = [] def add(x): if x.height() > self.tg.path.height() and x not in lst: lst.append(x) add(x.parent) for x in self.source: add(x.parent) return lst def write(self): Logs.debug('msvs: creating %r', self.path) # first write the project file template1 = compile_template(PROJECT_TEMPLATE) proj_str = template1(self) proj_str = rm_blank_lines(proj_str) self.path.stealth_write(proj_str) # then write the filter template2 = compile_template(FILTER_TEMPLATE) filter_str = template2(self) filter_str = rm_blank_lines(filter_str) tmp = self.path.parent.make_node(self.path.name + '.filters') tmp.stealth_write(filter_str) def get_key(self, node): """ required for writing the source files """ name = node.name if name.endswith(('.cpp', '.c')): return 'ClCompile' return 'ClInclude' def collect_properties(self): """ Returns a list of triplet (configuration, platform, output_directory) """ ret = [] for c in self.ctx.configurations: for p in self.ctx.platforms: x = build_property() x.outdir = '' x.configuration = c x.platform = p x.preprocessor_definitions = '' x.includes_search_path = '' # can specify "deploy_dir" too ret.append(x) self.build_properties = ret def get_build_params(self, props): opt = '--execsolution=%s' % self.ctx.get_solution_node().win32path() return (self.get_waf(), opt) def get_build_command(self, props): return "%s build %s" % self.get_build_params(props) def get_clean_command(self, props): return "%s clean %s" % self.get_build_params(props) def get_rebuild_command(self, props): return "%s clean build %s" % self.get_build_params(props) def get_filter_name(self, node): lst = diff(node, self.tg.path) return '\\'.join(lst) or '.' class vsnode_alias(vsnode_project): def __init__(self, ctx, node, name): vsnode_project.__init__(self, ctx, node) self.name = name self.output_file = '' class vsnode_build_all(vsnode_alias): """ Fake target used to emulate the behaviour of "make all" (starting one process by target is slow) This is the only alias enabled by default """ def __init__(self, ctx, node, name='build_all_projects'): vsnode_alias.__init__(self, ctx, node, name) self.is_active = True class vsnode_install_all(vsnode_alias): """ Fake target used to emulate the behaviour of "make install" """ def __init__(self, ctx, node, name='install_all_projects'): vsnode_alias.__init__(self, ctx, node, name) def get_build_command(self, props): return "%s build install %s" % self.get_build_params(props) def get_clean_command(self, props): return "%s clean %s" % self.get_build_params(props) def get_rebuild_command(self, props): return "%s clean build install %s" % self.get_build_params(props) class vsnode_project_view(vsnode_alias): """ Fake target used to emulate a file system view """ def __init__(self, ctx, node, name='project_view'): vsnode_alias.__init__(self, ctx, node, name) self.tg = self.ctx() # fake one, cannot remove self.exclude_files = Node.exclude_regs + ''' waf-2* waf3-2*/** .waf-2* .waf3-2*/** **/*.sdf **/*.suo **/*.ncb **/%s ''' % Options.lockfile def collect_source(self): # this is likely to be slow self.source = self.ctx.srcnode.ant_glob('**', excl=self.exclude_files) def get_build_command(self, props): params = self.get_build_params(props) + (self.ctx.cmd,) return "%s %s %s" % params def get_clean_command(self, props): return "" def get_rebuild_command(self, props): return self.get_build_command(props) class vsnode_target(vsnode_project): """ Visual studio project representing a targets (programs, libraries, etc) and bound to a task generator """ def __init__(self, ctx, tg): """ A project is more or less equivalent to a file/folder """ base = getattr(ctx, 'projects_dir', None) or tg.path node = base.make_node(quote(tg.name) + ctx.project_extension) # the project file as a Node vsnode_project.__init__(self, ctx, node) self.name = quote(tg.name) self.tg = tg # task generator def get_build_params(self, props): """ Override the default to add the target name """ opt = '--execsolution=%s' % self.ctx.get_solution_node().win32path() if getattr(self, 'tg', None): opt += " --targets=%s" % self.tg.name return (self.get_waf(), opt) def collect_source(self): tg = self.tg source_files = tg.to_nodes(getattr(tg, 'source', [])) include_dirs = Utils.to_list(getattr(tg, 'msvs_includes', [])) include_files = [] for x in include_dirs: if isinstance(x, str): x = tg.path.find_node(x) if x: lst = [y for y in x.ant_glob(HEADERS_GLOB, flat=False)] include_files.extend(lst) # remove duplicates self.source.extend(list(set(source_files + include_files))) self.source.sort(key=lambda x: x.win32path()) def collect_properties(self): """ Visual studio projects are associated with platforms and configurations (for building especially) """ super(vsnode_target, self).collect_properties() for x in self.build_properties: x.outdir = self.path.parent.win32path() x.preprocessor_definitions = '' x.includes_search_path = '' try: tsk = self.tg.link_task except AttributeError: pass else: x.output_file = tsk.outputs[0].win32path() x.preprocessor_definitions = ';'.join(tsk.env.DEFINES) x.includes_search_path = ';'.join(self.tg.env.INCPATHS) class msvs_generator(BuildContext): '''generates a visual studio 2010 solution''' cmd = 'msvs' fun = 'build' numver = '11.00' # Visual Studio Version Number vsver = '2010' # Visual Studio Version Year platform_toolset_ver = 'v110' # Platform Toolset Version Number def init(self): """ Some data that needs to be present """ if not getattr(self, 'configurations', None): self.configurations = ['Release'] # LocalRelease, RemoteDebug, etc if not getattr(self, 'platforms', None): self.platforms = ['Win32'] if not getattr(self, 'all_projects', None): self.all_projects = [] if not getattr(self, 'project_extension', None): self.project_extension = '.vcxproj' if not getattr(self, 'projects_dir', None): self.projects_dir = self.srcnode.make_node('.depproj') self.projects_dir.mkdir() # bind the classes to the object, so that subclass can provide custom generators if not getattr(self, 'vsnode_vsdir', None): self.vsnode_vsdir = vsnode_vsdir if not getattr(self, 'vsnode_target', None): self.vsnode_target = vsnode_target if not getattr(self, 'vsnode_build_all', None): self.vsnode_build_all = vsnode_build_all if not getattr(self, 'vsnode_install_all', None): self.vsnode_install_all = vsnode_install_all if not getattr(self, 'vsnode_project_view', None): self.vsnode_project_view = vsnode_project_view self.numver = self.__class__.numver self.vsver = self.__class__.vsver self.platform_toolset_ver = self.__class__.platform_toolset_ver def execute(self): """ Entry point """ self.restore() if not self.all_envs: self.load_envs() self.recurse([self.run_dir]) # user initialization self.init() # two phases for creating the solution self.collect_projects() # add project objects into "self.all_projects" self.write_files() # write the corresponding project and solution files def collect_projects(self): """ Fill the list self.all_projects with project objects Fill the list of build targets """ self.collect_targets() self.add_aliases() self.collect_dirs() default_project = getattr(self, 'default_project', None) def sortfun(x): if x.name == default_project: return '' return getattr(x, 'path', None) and x.path.win32path() or x.name self.all_projects.sort(key=sortfun) def write_files(self): """ Write the project and solution files from the data collected so far. It is unlikely that you will want to change this """ for p in self.all_projects: p.write() # and finally write the solution file node = self.get_solution_node() node.parent.mkdir() Logs.warn('Creating %r', node) template1 = compile_template(SOLUTION_TEMPLATE) sln_str = template1(self) sln_str = rm_blank_lines(sln_str) node.stealth_write(sln_str) def get_solution_node(self): """ The solution filename is required when writing the .vcproj files return self.solution_node and if it does not exist, make one """ try: return self.solution_node except AttributeError: pass solution_name = getattr(self, 'solution_name', None) if not solution_name: solution_name = getattr(Context.g_module, Context.APPNAME, 'project') + '.sln' if os.path.isabs(solution_name): self.solution_node = self.root.make_node(solution_name) else: self.solution_node = self.srcnode.make_node(solution_name) return self.solution_node def project_configurations(self): """ Helper that returns all the pairs (config,platform) """ ret = [] for c in self.configurations: for p in self.platforms: ret.append((c, p)) return ret def collect_targets(self): """ Process the list of task generators """ for g in self.groups: for tg in g: if not isinstance(tg, TaskGen.task_gen): continue if not hasattr(tg, 'msvs_includes'): tg.msvs_includes = tg.to_list(getattr(tg, 'includes', [])) + tg.to_list(getattr(tg, 'export_includes', [])) tg.post() if not getattr(tg, 'link_task', None): continue p = self.vsnode_target(self, tg) p.collect_source() # delegate this processing p.collect_properties() self.all_projects.append(p) def add_aliases(self): """ Add a specific target that emulates the "make all" necessary for Visual studio when pressing F7 We also add an alias for "make install" (disabled by default) """ base = getattr(self, 'projects_dir', None) or self.tg.path node_project = base.make_node('build_all_projects' + self.project_extension) # Node p_build = self.vsnode_build_all(self, node_project) p_build.collect_properties() self.all_projects.append(p_build) node_project = base.make_node('install_all_projects' + self.project_extension) # Node p_install = self.vsnode_install_all(self, node_project) p_install.collect_properties() self.all_projects.append(p_install) node_project = base.make_node('project_view' + self.project_extension) # Node p_view = self.vsnode_project_view(self, node_project) p_view.collect_source() p_view.collect_properties() self.all_projects.append(p_view) n = self.vsnode_vsdir(self, make_uuid(self.srcnode.win32path() + 'build_aliases'), "build_aliases") p_build.parent = p_install.parent = p_view.parent = n self.all_projects.append(n) def collect_dirs(self): """ Create the folder structure in the Visual studio project view """ seen = {} def make_parents(proj): # look at a project, try to make a parent if getattr(proj, 'parent', None): # aliases already have parents return x = proj.iter_path if x in seen: proj.parent = seen[x] return # There is not vsnode_vsdir for x. # So create a project representing the folder "x" n = proj.parent = seen[x] = self.vsnode_vsdir(self, make_uuid(x.win32path()), x.name) n.iter_path = x.parent self.all_projects.append(n) # recurse up to the project directory if x.height() > self.srcnode.height() + 1: make_parents(n) for p in self.all_projects[:]: # iterate over a copy of all projects if not getattr(p, 'tg', None): # but only projects that have a task generator continue # make a folder for each task generator p.iter_path = p.tg.path make_parents(p) def wrap_2008(cls): class dec(cls): def __init__(self, *k, **kw): cls.__init__(self, *k, **kw) self.project_template = PROJECT_2008_TEMPLATE def display_filter(self): root = build_property() root.subfilters = [] root.sourcefiles = [] root.source = [] root.name = '' @Utils.run_once def add_path(lst): if not lst: return root child = build_property() child.subfilters = [] child.sourcefiles = [] child.source = [] child.name = lst[-1] par = add_path(lst[:-1]) par.subfilters.append(child) return child for x in self.source: # this crap is for enabling subclasses to override get_filter_name tmp = self.get_filter_name(x.parent) tmp = tmp != '.' and tuple(tmp.split('\\')) or () par = add_path(tmp) par.source.append(x) def display(n): buf = [] for x in n.source: buf.append('\n' % (xml_escape(x.win32path()), self.get_key(x))) for x in n.subfilters: buf.append('' % xml_escape(x.name)) buf.append(display(x)) buf.append('') return '\n'.join(buf) return display(root) def get_key(self, node): """ If you do not want to let visual studio use the default file extensions, override this method to return a value: 0: C/C++ Code, 1: C++ Class, 2: C++ Header File, 3: C++ Form, 4: C++ Control, 5: Text File, 6: DEF File, 7: IDL File, 8: Makefile, 9: RGS File, 10: RC File, 11: RES File, 12: XSD File, 13: XML File, 14: HTML File, 15: CSS File, 16: Bitmap, 17: Icon, 18: Resx File, 19: BSC File, 20: XSX File, 21: C++ Web Service, 22: ASAX File, 23: Asp Page, 24: Document, 25: Discovery File, 26: C# File, 27: eFileTypeClassDiagram, 28: MHTML Document, 29: Property Sheet, 30: Cursor, 31: Manifest, 32: eFileTypeRDLC """ return '' def write(self): Logs.debug('msvs: creating %r', self.path) template1 = compile_template(self.project_template) proj_str = template1(self) proj_str = rm_blank_lines(proj_str) self.path.stealth_write(proj_str) return dec class msvs_2008_generator(msvs_generator): '''generates a visual studio 2008 solution''' cmd = 'msvs2008' fun = msvs_generator.fun numver = '10.00' vsver = '2008' def init(self): if not getattr(self, 'project_extension', None): self.project_extension = '_2008.vcproj' if not getattr(self, 'solution_name', None): self.solution_name = getattr(Context.g_module, Context.APPNAME, 'project') + '_2008.sln' if not getattr(self, 'vsnode_target', None): self.vsnode_target = wrap_2008(vsnode_target) if not getattr(self, 'vsnode_build_all', None): self.vsnode_build_all = wrap_2008(vsnode_build_all) if not getattr(self, 'vsnode_install_all', None): self.vsnode_install_all = wrap_2008(vsnode_install_all) if not getattr(self, 'vsnode_project_view', None): self.vsnode_project_view = wrap_2008(vsnode_project_view) msvs_generator.init(self) def options(ctx): """ If the msvs option is used, try to detect if the build is made from visual studio """ ctx.add_option('--execsolution', action='store', help='when building with visual studio, use a build state file') old = BuildContext.execute def override_build_state(ctx): def lock(rm, add): uns = ctx.options.execsolution.replace('.sln', rm) uns = ctx.root.make_node(uns) try: uns.delete() except OSError: pass uns = ctx.options.execsolution.replace('.sln', add) uns = ctx.root.make_node(uns) try: uns.write('') except EnvironmentError: pass if ctx.options.execsolution: ctx.launch_dir = Context.top_dir # force a build for the whole project (invalid cwd when called by visual studio) lock('.lastbuildstate', '.unsuccessfulbuild') old(ctx) lock('.unsuccessfulbuild', '.lastbuildstate') else: old(ctx) BuildContext.execute = override_build_state tdb-1.4.2/third_party/waf/waflib/extras/netcache_client.py0000660000000000000000000002160713444661622023603 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2011-2015 (ita) """ A client for the network cache (playground/netcache/). Launch the server with: ./netcache_server, then use it for the builds by adding the following: def build(bld): bld.load('netcache_client') The parameters should be present in the environment in the form: NETCACHE=host:port waf configure build Or in a more detailed way: NETCACHE_PUSH=host:port NETCACHE_PULL=host:port waf configure build where: host: host where the server resides, by default localhost port: by default push on 11001 and pull on 12001 Use the server provided in playground/netcache/Netcache.java """ import os, socket, time, atexit, sys from waflib import Task, Logs, Utils, Build, Runner from waflib.Configure import conf BUF = 8192 * 16 HEADER_SIZE = 128 MODES = ['PUSH', 'PULL', 'PUSH_PULL'] STALE_TIME = 30 # seconds GET = 'GET' PUT = 'PUT' LST = 'LST' BYE = 'BYE' all_sigs_in_cache = (0.0, []) def put_data(conn, data): if sys.hexversion > 0x3000000: data = data.encode('latin-1') cnt = 0 while cnt < len(data): sent = conn.send(data[cnt:]) if sent == 0: raise RuntimeError('connection ended') cnt += sent push_connections = Runner.Queue(0) pull_connections = Runner.Queue(0) def get_connection(push=False): # return a new connection... do not forget to release it! try: if push: ret = push_connections.get(block=False) else: ret = pull_connections.get(block=False) except Exception: ret = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if push: ret.connect(Task.push_addr) else: ret.connect(Task.pull_addr) return ret def release_connection(conn, msg='', push=False): if conn: if push: push_connections.put(conn) else: pull_connections.put(conn) def close_connection(conn, msg=''): if conn: data = '%s,%s' % (BYE, msg) try: put_data(conn, data.ljust(HEADER_SIZE)) except: pass try: conn.close() except: pass def close_all(): for q in (push_connections, pull_connections): while q.qsize(): conn = q.get() try: close_connection(conn) except: # ignore errors when cleaning up pass atexit.register(close_all) def read_header(conn): cnt = 0 buf = [] while cnt < HEADER_SIZE: data = conn.recv(HEADER_SIZE - cnt) if not data: #import traceback #traceback.print_stack() raise ValueError('connection ended when reading a header %r' % buf) buf.append(data) cnt += len(data) if sys.hexversion > 0x3000000: ret = ''.encode('latin-1').join(buf) ret = ret.decode('latin-1') else: ret = ''.join(buf) return ret def check_cache(conn, ssig): """ List the files on the server, this is an optimization because it assumes that concurrent builds are rare """ global all_sigs_in_cache if not STALE_TIME: return if time.time() - all_sigs_in_cache[0] > STALE_TIME: params = (LST,'') put_data(conn, ','.join(params).ljust(HEADER_SIZE)) # read what is coming back ret = read_header(conn) size = int(ret.split(',')[0]) buf = [] cnt = 0 while cnt < size: data = conn.recv(min(BUF, size-cnt)) if not data: raise ValueError('connection ended %r %r' % (cnt, size)) buf.append(data) cnt += len(data) if sys.hexversion > 0x3000000: ret = ''.encode('latin-1').join(buf) ret = ret.decode('latin-1') else: ret = ''.join(buf) all_sigs_in_cache = (time.time(), ret.splitlines()) Logs.debug('netcache: server cache has %r entries', len(all_sigs_in_cache[1])) if not ssig in all_sigs_in_cache[1]: raise ValueError('no file %s in cache' % ssig) class MissingFile(Exception): pass def recv_file(conn, ssig, count, p): check_cache(conn, ssig) params = (GET, ssig, str(count)) put_data(conn, ','.join(params).ljust(HEADER_SIZE)) data = read_header(conn) size = int(data.split(',')[0]) if size == -1: raise MissingFile('no file %s - %s in cache' % (ssig, count)) # get the file, writing immediately # TODO a tmp file would be better f = open(p, 'wb') cnt = 0 while cnt < size: data = conn.recv(min(BUF, size-cnt)) if not data: raise ValueError('connection ended %r %r' % (cnt, size)) f.write(data) cnt += len(data) f.close() def sock_send(conn, ssig, cnt, p): #print "pushing %r %r %r" % (ssig, cnt, p) size = os.stat(p).st_size params = (PUT, ssig, str(cnt), str(size)) put_data(conn, ','.join(params).ljust(HEADER_SIZE)) f = open(p, 'rb') cnt = 0 while cnt < size: r = f.read(min(BUF, size-cnt)) while r: k = conn.send(r) if not k: raise ValueError('connection ended') cnt += k r = r[k:] def can_retrieve_cache(self): if not Task.pull_addr: return False if not self.outputs: return False self.cached = False cnt = 0 sig = self.signature() ssig = Utils.to_hex(self.uid() + sig) conn = None err = False try: try: conn = get_connection() for node in self.outputs: p = node.abspath() recv_file(conn, ssig, cnt, p) cnt += 1 except MissingFile as e: Logs.debug('netcache: file is not in the cache %r', e) err = True except Exception as e: Logs.debug('netcache: could not get the files %r', self.outputs) if Logs.verbose > 1: Logs.debug('netcache: exception %r', e) err = True # broken connection? remove this one close_connection(conn) conn = None else: Logs.debug('netcache: obtained %r from cache', self.outputs) finally: release_connection(conn) if err: return False self.cached = True return True @Utils.run_once def put_files_cache(self): if not Task.push_addr: return if not self.outputs: return if getattr(self, 'cached', None): return #print "called put_files_cache", id(self) bld = self.generator.bld sig = self.signature() ssig = Utils.to_hex(self.uid() + sig) conn = None cnt = 0 try: for node in self.outputs: # We could re-create the signature of the task with the signature of the outputs # in practice, this means hashing the output files # this is unnecessary try: if not conn: conn = get_connection(push=True) sock_send(conn, ssig, cnt, node.abspath()) Logs.debug('netcache: sent %r', node) except Exception as e: Logs.debug('netcache: could not push the files %r', e) # broken connection? remove this one close_connection(conn) conn = None cnt += 1 finally: release_connection(conn, push=True) bld.task_sigs[self.uid()] = self.cache_sig def hash_env_vars(self, env, vars_lst): # reimplement so that the resulting hash does not depend on local paths if not env.table: env = env.parent if not env: return Utils.SIG_NIL idx = str(id(env)) + str(vars_lst) try: cache = self.cache_env except AttributeError: cache = self.cache_env = {} else: try: return self.cache_env[idx] except KeyError: pass v = str([env[a] for a in vars_lst]) v = v.replace(self.srcnode.abspath().__repr__()[:-1], '') m = Utils.md5() m.update(v.encode()) ret = m.digest() Logs.debug('envhash: %r %r', ret, v) cache[idx] = ret return ret def uid(self): # reimplement so that the signature does not depend on local paths try: return self.uid_ except AttributeError: m = Utils.md5() src = self.generator.bld.srcnode up = m.update up(self.__class__.__name__.encode()) for x in self.inputs + self.outputs: up(x.path_from(src).encode()) self.uid_ = m.digest() return self.uid_ def make_cached(cls): if getattr(cls, 'nocache', None): return m1 = cls.run def run(self): if getattr(self, 'nocache', False): return m1(self) if self.can_retrieve_cache(): return 0 return m1(self) cls.run = run m2 = cls.post_run def post_run(self): if getattr(self, 'nocache', False): return m2(self) bld = self.generator.bld ret = m2(self) if bld.cache_global: self.put_files_cache() if hasattr(self, 'chmod'): for node in self.outputs: os.chmod(node.abspath(), self.chmod) return ret cls.post_run = post_run @conf def setup_netcache(ctx, push_addr, pull_addr): Task.Task.can_retrieve_cache = can_retrieve_cache Task.Task.put_files_cache = put_files_cache Task.Task.uid = uid Task.push_addr = push_addr Task.pull_addr = pull_addr Build.BuildContext.hash_env_vars = hash_env_vars ctx.cache_global = True for x in Task.classes.values(): make_cached(x) def build(bld): if not 'NETCACHE' in os.environ and not 'NETCACHE_PULL' in os.environ and not 'NETCACHE_PUSH' in os.environ: Logs.warn('Setting NETCACHE_PULL=127.0.0.1:11001 and NETCACHE_PUSH=127.0.0.1:12001') os.environ['NETCACHE_PULL'] = '127.0.0.1:12001' os.environ['NETCACHE_PUSH'] = '127.0.0.1:11001' if 'NETCACHE' in os.environ: if not 'NETCACHE_PUSH' in os.environ: os.environ['NETCACHE_PUSH'] = os.environ['NETCACHE'] if not 'NETCACHE_PULL' in os.environ: os.environ['NETCACHE_PULL'] = os.environ['NETCACHE'] v = os.environ['NETCACHE_PULL'] if v: h, p = v.split(':') pull_addr = (h, int(p)) else: pull_addr = None v = os.environ['NETCACHE_PUSH'] if v: h, p = v.split(':') push_addr = (h, int(p)) else: push_addr = None setup_netcache(bld, push_addr, pull_addr) tdb-1.4.2/third_party/waf/waflib/extras/objcopy.py0000660000000000000000000000326013444661622022133 0ustar rootroot00000000000000#!/usr/bin/python # Grygoriy Fuchedzhy 2010 """ Support for converting linked targets to ihex, srec or binary files using objcopy. Use the 'objcopy' feature in conjunction with the 'cc' or 'cxx' feature. The 'objcopy' feature uses the following attributes: objcopy_bfdname Target object format name (eg. ihex, srec, binary). Defaults to ihex. objcopy_target File name used for objcopy output. This defaults to the target name with objcopy_bfdname as extension. objcopy_install_path Install path for objcopy_target file. Defaults to ${PREFIX}/fw. objcopy_flags Additional flags passed to objcopy. """ from waflib.Utils import def_attrs from waflib import Task from waflib.TaskGen import feature, after_method class objcopy(Task.Task): run_str = '${OBJCOPY} -O ${TARGET_BFDNAME} ${OBJCOPYFLAGS} ${SRC} ${TGT}' color = 'CYAN' @feature('objcopy') @after_method('apply_link') def map_objcopy(self): def_attrs(self, objcopy_bfdname = 'ihex', objcopy_target = None, objcopy_install_path = "${PREFIX}/firmware", objcopy_flags = '') link_output = self.link_task.outputs[0] if not self.objcopy_target: self.objcopy_target = link_output.change_ext('.' + self.objcopy_bfdname).name task = self.create_task('objcopy', src=link_output, tgt=self.path.find_or_declare(self.objcopy_target)) task.env.append_unique('TARGET_BFDNAME', self.objcopy_bfdname) try: task.env.append_unique('OBJCOPYFLAGS', getattr(self, 'objcopy_flags')) except AttributeError: pass if self.objcopy_install_path: self.add_install_files(install_to=self.objcopy_install_path, install_from=task.outputs[0]) def configure(ctx): ctx.find_program('objcopy', var='OBJCOPY', mandatory=True) tdb-1.4.2/third_party/waf/waflib/extras/ocaml.py0000660000000000000000000002247013527011455021560 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2010 (ita) "ocaml support" import os, re from waflib import Utils, Task from waflib.Logs import error from waflib.TaskGen import feature, before_method, after_method, extension EXT_MLL = ['.mll'] EXT_MLY = ['.mly'] EXT_MLI = ['.mli'] EXT_MLC = ['.c'] EXT_ML = ['.ml'] open_re = re.compile(r'^\s*open\s+([a-zA-Z]+)(;;){0,1}$', re.M) foo = re.compile(r"""(\(\*)|(\*\))|("(\\.|[^"\\])*"|'(\\.|[^'\\])*'|.[^()*"'\\]*)""", re.M) def filter_comments(txt): meh = [0] def repl(m): if m.group(1): meh[0] += 1 elif m.group(2): meh[0] -= 1 elif not meh[0]: return m.group() return '' return foo.sub(repl, txt) def scan(self): node = self.inputs[0] code = filter_comments(node.read()) global open_re names = [] import_iterator = open_re.finditer(code) if import_iterator: for import_match in import_iterator: names.append(import_match.group(1)) found_lst = [] raw_lst = [] for name in names: nd = None for x in self.incpaths: nd = x.find_resource(name.lower()+'.ml') if not nd: nd = x.find_resource(name+'.ml') if nd: found_lst.append(nd) break else: raw_lst.append(name) return (found_lst, raw_lst) native_lst=['native', 'all', 'c_object'] bytecode_lst=['bytecode', 'all'] @feature('ocaml') def init_ml(self): Utils.def_attrs(self, type = 'all', incpaths_lst = [], bld_incpaths_lst = [], mlltasks = [], mlytasks = [], mlitasks = [], native_tasks = [], bytecode_tasks = [], linktasks = [], bytecode_env = None, native_env = None, compiled_tasks = [], includes = '', uselib = '', are_deps_set = 0) @feature('ocaml') @after_method('init_ml') def init_envs_ml(self): self.islibrary = getattr(self, 'islibrary', False) global native_lst, bytecode_lst self.native_env = None if self.type in native_lst: self.native_env = self.env.derive() if self.islibrary: self.native_env['OCALINKFLAGS'] = '-a' self.bytecode_env = None if self.type in bytecode_lst: self.bytecode_env = self.env.derive() if self.islibrary: self.bytecode_env['OCALINKFLAGS'] = '-a' if self.type == 'c_object': self.native_env.append_unique('OCALINKFLAGS_OPT', '-output-obj') @feature('ocaml') @before_method('apply_vars_ml') @after_method('init_envs_ml') def apply_incpaths_ml(self): inc_lst = self.includes.split() lst = self.incpaths_lst for dir in inc_lst: node = self.path.find_dir(dir) if not node: error("node not found: " + str(dir)) continue if not node in lst: lst.append(node) self.bld_incpaths_lst.append(node) # now the nodes are added to self.incpaths_lst @feature('ocaml') @before_method('process_source') def apply_vars_ml(self): for i in self.incpaths_lst: if self.bytecode_env: app = self.bytecode_env.append_value app('OCAMLPATH', ['-I', i.bldpath(), '-I', i.srcpath()]) if self.native_env: app = self.native_env.append_value app('OCAMLPATH', ['-I', i.bldpath(), '-I', i.srcpath()]) varnames = ['INCLUDES', 'OCAMLFLAGS', 'OCALINKFLAGS', 'OCALINKFLAGS_OPT'] for name in self.uselib.split(): for vname in varnames: cnt = self.env[vname+'_'+name] if cnt: if self.bytecode_env: self.bytecode_env.append_value(vname, cnt) if self.native_env: self.native_env.append_value(vname, cnt) @feature('ocaml') @after_method('process_source') def apply_link_ml(self): if self.bytecode_env: ext = self.islibrary and '.cma' or '.run' linktask = self.create_task('ocalink') linktask.bytecode = 1 linktask.set_outputs(self.path.find_or_declare(self.target + ext)) linktask.env = self.bytecode_env self.linktasks.append(linktask) if self.native_env: if self.type == 'c_object': ext = '.o' elif self.islibrary: ext = '.cmxa' else: ext = '' linktask = self.create_task('ocalinkx') linktask.set_outputs(self.path.find_or_declare(self.target + ext)) linktask.env = self.native_env self.linktasks.append(linktask) # we produce a .o file to be used by gcc self.compiled_tasks.append(linktask) @extension(*EXT_MLL) def mll_hook(self, node): mll_task = self.create_task('ocamllex', node, node.change_ext('.ml')) mll_task.env = self.native_env.derive() self.mlltasks.append(mll_task) self.source.append(mll_task.outputs[0]) @extension(*EXT_MLY) def mly_hook(self, node): mly_task = self.create_task('ocamlyacc', node, [node.change_ext('.ml'), node.change_ext('.mli')]) mly_task.env = self.native_env.derive() self.mlytasks.append(mly_task) self.source.append(mly_task.outputs[0]) task = self.create_task('ocamlcmi', mly_task.outputs[1], mly_task.outputs[1].change_ext('.cmi')) task.env = self.native_env.derive() @extension(*EXT_MLI) def mli_hook(self, node): task = self.create_task('ocamlcmi', node, node.change_ext('.cmi')) task.env = self.native_env.derive() self.mlitasks.append(task) @extension(*EXT_MLC) def mlc_hook(self, node): task = self.create_task('ocamlcc', node, node.change_ext('.o')) task.env = self.native_env.derive() self.compiled_tasks.append(task) @extension(*EXT_ML) def ml_hook(self, node): if self.native_env: task = self.create_task('ocamlx', node, node.change_ext('.cmx')) task.env = self.native_env.derive() task.incpaths = self.bld_incpaths_lst self.native_tasks.append(task) if self.bytecode_env: task = self.create_task('ocaml', node, node.change_ext('.cmo')) task.env = self.bytecode_env.derive() task.bytecode = 1 task.incpaths = self.bld_incpaths_lst self.bytecode_tasks.append(task) def compile_may_start(self): if not getattr(self, 'flag_deps', ''): self.flag_deps = 1 # the evil part is that we can only compute the dependencies after the # source files can be read (this means actually producing the source files) if getattr(self, 'bytecode', ''): alltasks = self.generator.bytecode_tasks else: alltasks = self.generator.native_tasks self.signature() # ensure that files are scanned - unfortunately tree = self.generator.bld for node in self.inputs: lst = tree.node_deps[self.uid()] for depnode in lst: for t in alltasks: if t == self: continue if depnode in t.inputs: self.set_run_after(t) # TODO necessary to get the signature right - for now delattr(self, 'cache_sig') self.signature() return Task.Task.runnable_status(self) class ocamlx(Task.Task): """native caml compilation""" color = 'GREEN' run_str = '${OCAMLOPT} ${OCAMLPATH} ${OCAMLFLAGS} ${OCAMLINCLUDES} -c -o ${TGT} ${SRC}' scan = scan runnable_status = compile_may_start class ocaml(Task.Task): """bytecode caml compilation""" color = 'GREEN' run_str = '${OCAMLC} ${OCAMLPATH} ${OCAMLFLAGS} ${OCAMLINCLUDES} -c -o ${TGT} ${SRC}' scan = scan runnable_status = compile_may_start class ocamlcmi(Task.Task): """interface generator (the .i files?)""" color = 'BLUE' run_str = '${OCAMLC} ${OCAMLPATH} ${OCAMLINCLUDES} -o ${TGT} -c ${SRC}' before = ['ocamlcc', 'ocaml', 'ocamlcc'] class ocamlcc(Task.Task): """ocaml to c interfaces""" color = 'GREEN' run_str = 'cd ${TGT[0].bld_dir()} && ${OCAMLOPT} ${OCAMLFLAGS} ${OCAMLPATH} ${OCAMLINCLUDES} -c ${SRC[0].abspath()}' class ocamllex(Task.Task): """lexical generator""" color = 'BLUE' run_str = '${OCAMLLEX} ${SRC} -o ${TGT}' before = ['ocamlcmi', 'ocaml', 'ocamlcc'] class ocamlyacc(Task.Task): """parser generator""" color = 'BLUE' run_str = '${OCAMLYACC} -b ${tsk.base()} ${SRC}' before = ['ocamlcmi', 'ocaml', 'ocamlcc'] def base(self): node = self.outputs[0] s = os.path.splitext(node.name)[0] return node.bld_dir() + os.sep + s def link_may_start(self): if getattr(self, 'bytecode', 0): alltasks = self.generator.bytecode_tasks else: alltasks = self.generator.native_tasks for x in alltasks: if not x.hasrun: return Task.ASK_LATER if not getattr(self, 'order', ''): # now reorder the inputs given the task dependencies # this part is difficult, we do not have a total order on the tasks # if the dependencies are wrong, this may not stop seen = [] pendant = []+alltasks while pendant: task = pendant.pop(0) if task in seen: continue for x in task.run_after: if not x in seen: pendant.append(task) break else: seen.append(task) self.inputs = [x.outputs[0] for x in seen] self.order = 1 return Task.Task.runnable_status(self) class ocalink(Task.Task): """bytecode caml link""" color = 'YELLOW' run_str = '${OCAMLC} -o ${TGT} ${OCAMLINCLUDES} ${OCALINKFLAGS} ${SRC}' runnable_status = link_may_start after = ['ocaml', 'ocamlcc'] class ocalinkx(Task.Task): """native caml link""" color = 'YELLOW' run_str = '${OCAMLOPT} -o ${TGT} ${OCAMLINCLUDES} ${OCALINKFLAGS_OPT} ${SRC}' runnable_status = link_may_start after = ['ocamlx', 'ocamlcc'] def configure(conf): opt = conf.find_program('ocamlopt', var='OCAMLOPT', mandatory=False) occ = conf.find_program('ocamlc', var='OCAMLC', mandatory=False) if (not opt) or (not occ): conf.fatal('The objective caml compiler was not found:\ninstall it or make it available in your PATH') v = conf.env v['OCAMLC'] = occ v['OCAMLOPT'] = opt v['OCAMLLEX'] = conf.find_program('ocamllex', var='OCAMLLEX', mandatory=False) v['OCAMLYACC'] = conf.find_program('ocamlyacc', var='OCAMLYACC', mandatory=False) v['OCAMLFLAGS'] = '' where = conf.cmd_and_log(conf.env.OCAMLC + ['-where']).strip()+os.sep v['OCAMLLIB'] = where v['LIBPATH_OCAML'] = where v['INCLUDES_OCAML'] = where v['LIB_OCAML'] = 'camlrun' tdb-1.4.2/third_party/waf/waflib/extras/package.py0000660000000000000000000000306613444661622022065 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2011 """ Obtain packages, unpack them in a location, and add associated uselib variables (CFLAGS_pkgname, LIBPATH_pkgname, etc). The default is use a Dependencies.txt file in the source directory. This is a work in progress. Usage: def options(opt): opt.load('package') def configure(conf): conf.load_packages() """ from waflib import Logs from waflib.Configure import conf try: from urllib import request except ImportError: from urllib import urlopen else: urlopen = request.urlopen CACHEVAR = 'WAFCACHE_PACKAGE' @conf def get_package_cache_dir(self): cache = None if CACHEVAR in conf.environ: cache = conf.environ[CACHEVAR] cache = self.root.make_node(cache) elif self.env[CACHEVAR]: cache = self.env[CACHEVAR] cache = self.root.make_node(cache) else: cache = self.srcnode.make_node('.wafcache_package') cache.mkdir() return cache @conf def download_archive(self, src, dst): for x in self.env.PACKAGE_REPO: url = '/'.join((x, src)) try: web = urlopen(url) try: if web.getcode() != 200: continue except AttributeError: pass except Exception: # on python3 urlopen throws an exception # python 2.3 does not have getcode and throws an exception to fail continue else: tmp = self.root.make_node(dst) tmp.write(web.read()) Logs.warn('Downloaded %s from %s', tmp.abspath(), url) break else: self.fatal('Could not get the package %s' % src) @conf def load_packages(self): self.get_package_cache_dir() # read the dependencies, get the archives, .. tdb-1.4.2/third_party/waf/waflib/extras/parallel_debug.py0000660000000000000000000002744313527011455023434 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2007-2010 (ita) """ Debugging helper for parallel compilation. Copy it to your project and load it with:: def options(opt): opt.load('parallel_debug', tooldir='.') def build(bld): ... The build will then output a file named pdebug.svg in the source directory. """ import re, sys, threading, time, traceback try: from Queue import Queue except: from queue import Queue from waflib import Runner, Options, Task, Logs, Errors SVG_TEMPLATE = """ ${if project.title} ${project.title} ${endif} ${for cls in project.groups} ${for rect in cls.rects} ${endfor} ${endfor} ${for info in project.infos} ${info.text} ${endfor} ${if project.tooltip} ${endif} """ COMPILE_TEMPLATE = '''def f(project): lst = [] def xml_escape(value): return value.replace("&", "&").replace('"', """).replace("'", "'").replace("<", "<").replace(">", ">") %s return ''.join(lst) ''' reg_act = re.compile(r"(?P\\)|(?P\$\$)|(?P\$\{(?P[^}]*?)\})", re.M) def compile_template(line): extr = [] def repl(match): g = match.group if g('dollar'): return "$" elif g('backslash'): return "\\" elif g('subst'): extr.append(g('code')) return "<<|@|>>" return None line2 = reg_act.sub(repl, line) params = line2.split('<<|@|>>') assert(extr) indent = 0 buf = [] app = buf.append def app(txt): buf.append(indent * '\t' + txt) for x in range(len(extr)): if params[x]: app("lst.append(%r)" % params[x]) f = extr[x] if f.startswith(('if', 'for')): app(f + ':') indent += 1 elif f.startswith('py:'): app(f[3:]) elif f.startswith(('endif', 'endfor')): indent -= 1 elif f.startswith(('else', 'elif')): indent -= 1 app(f + ':') indent += 1 elif f.startswith('xml:'): app('lst.append(xml_escape(%s))' % f[4:]) else: #app('lst.append((%s) or "cannot find %s")' % (f, f)) app('lst.append(str(%s))' % f) if extr: if params[-1]: app("lst.append(%r)" % params[-1]) fun = COMPILE_TEMPLATE % "\n\t".join(buf) # uncomment the following to debug the template #for i, x in enumerate(fun.splitlines()): # print i, x return Task.funex(fun) # red #ff4d4d # green #4da74d # lila #a751ff color2code = { 'GREEN' : '#4da74d', 'YELLOW' : '#fefe44', 'PINK' : '#a751ff', 'RED' : '#cc1d1d', 'BLUE' : '#6687bb', 'CYAN' : '#34e2e2', } mp = {} info = [] # list of (text,color) def map_to_color(name): if name in mp: return mp[name] try: cls = Task.classes[name] except KeyError: return color2code['RED'] if cls.color in mp: return mp[cls.color] if cls.color in color2code: return color2code[cls.color] return color2code['RED'] def process(self): m = self.generator.bld.producer try: # TODO another place for this? del self.generator.bld.task_sigs[self.uid()] except KeyError: pass self.generator.bld.producer.set_running(1, self) try: ret = self.run() except Exception: self.err_msg = traceback.format_exc() self.hasrun = Task.EXCEPTION # TODO cleanup m.error_handler(self) return if ret: self.err_code = ret self.hasrun = Task.CRASHED else: try: self.post_run() except Errors.WafError: pass except Exception: self.err_msg = traceback.format_exc() self.hasrun = Task.EXCEPTION else: self.hasrun = Task.SUCCESS if self.hasrun != Task.SUCCESS: m.error_handler(self) self.generator.bld.producer.set_running(-1, self) Task.Task.process_back = Task.Task.process Task.Task.process = process old_start = Runner.Parallel.start def do_start(self): try: Options.options.dband except AttributeError: self.bld.fatal('use def options(opt): opt.load("parallel_debug")!') self.taskinfo = Queue() old_start(self) if self.dirty: make_picture(self) Runner.Parallel.start = do_start lock_running = threading.Lock() def set_running(self, by, tsk): with lock_running: try: cache = self.lock_cache except AttributeError: cache = self.lock_cache = {} i = 0 if by > 0: vals = cache.values() for i in range(self.numjobs): if i not in vals: cache[tsk] = i break else: i = cache[tsk] del cache[tsk] self.taskinfo.put( (i, id(tsk), time.time(), tsk.__class__.__name__, self.processed, self.count, by, ",".join(map(str, tsk.outputs))) ) Runner.Parallel.set_running = set_running def name2class(name): return name.replace(' ', '_').replace('.', '_') def make_picture(producer): # first, cast the parameters if not hasattr(producer.bld, 'path'): return tmp = [] try: while True: tup = producer.taskinfo.get(False) tmp.append(list(tup)) except: pass try: ini = float(tmp[0][2]) except: return if not info: seen = [] for x in tmp: name = x[3] if not name in seen: seen.append(name) else: continue info.append((name, map_to_color(name))) info.sort(key=lambda x: x[0]) thread_count = 0 acc = [] for x in tmp: thread_count += x[6] acc.append("%d %d %f %r %d %d %d %s" % (x[0], x[1], x[2] - ini, x[3], x[4], x[5], thread_count, x[7])) data_node = producer.bld.path.make_node('pdebug.dat') data_node.write('\n'.join(acc)) tmp = [lst[:2] + [float(lst[2]) - ini] + lst[3:] for lst in tmp] st = {} for l in tmp: if not l[0] in st: st[l[0]] = len(st.keys()) tmp = [ [st[lst[0]]] + lst[1:] for lst in tmp ] THREAD_AMOUNT = len(st.keys()) st = {} for l in tmp: if not l[1] in st: st[l[1]] = len(st.keys()) tmp = [ [lst[0]] + [st[lst[1]]] + lst[2:] for lst in tmp ] BAND = Options.options.dband seen = {} acc = [] for x in range(len(tmp)): line = tmp[x] id = line[1] if id in seen: continue seen[id] = True begin = line[2] thread_id = line[0] for y in range(x + 1, len(tmp)): line = tmp[y] if line[1] == id: end = line[2] #print id, thread_id, begin, end #acc.append( ( 10*thread_id, 10*(thread_id+1), 10*begin, 10*end ) ) acc.append( (BAND * begin, BAND*thread_id, BAND*end - BAND*begin, BAND, line[3], line[7]) ) break if Options.options.dmaxtime < 0.1: gwidth = 1 for x in tmp: m = BAND * x[2] if m > gwidth: gwidth = m else: gwidth = BAND * Options.options.dmaxtime ratio = float(Options.options.dwidth) / gwidth gwidth = Options.options.dwidth gheight = BAND * (THREAD_AMOUNT + len(info) + 1.5) # simple data model for our template class tobject(object): pass model = tobject() model.x = 0 model.y = 0 model.width = gwidth + 4 model.height = gheight + 4 model.tooltip = not Options.options.dnotooltip model.title = Options.options.dtitle model.title_x = gwidth / 2 model.title_y = gheight + - 5 groups = {} for (x, y, w, h, clsname, name) in acc: try: groups[clsname].append((x, y, w, h, name)) except: groups[clsname] = [(x, y, w, h, name)] # groups of rectangles (else js highlighting is slow) model.groups = [] for cls in groups: g = tobject() model.groups.append(g) g.classname = name2class(cls) g.rects = [] for (x, y, w, h, name) in groups[cls]: r = tobject() g.rects.append(r) r.x = 2 + x * ratio r.y = 2 + y r.width = w * ratio r.height = h r.name = name r.color = map_to_color(cls) cnt = THREAD_AMOUNT # caption model.infos = [] for (text, color) in info: inf = tobject() model.infos.append(inf) inf.classname = name2class(text) inf.x = 2 + BAND inf.y = 5 + (cnt + 0.5) * BAND inf.width = BAND/2 inf.height = BAND/2 inf.color = color inf.text = text inf.text_x = 2 + 2 * BAND inf.text_y = 5 + (cnt + 0.5) * BAND + 10 cnt += 1 # write the file... template1 = compile_template(SVG_TEMPLATE) txt = template1(model) node = producer.bld.path.make_node('pdebug.svg') node.write(txt) Logs.warn('Created the diagram %r', node) def options(opt): opt.add_option('--dtitle', action='store', default='Parallel build representation for %r' % ' '.join(sys.argv), help='title for the svg diagram', dest='dtitle') opt.add_option('--dwidth', action='store', type='int', help='diagram width', default=800, dest='dwidth') opt.add_option('--dtime', action='store', type='float', help='recording interval in seconds', default=0.009, dest='dtime') opt.add_option('--dband', action='store', type='int', help='band width', default=22, dest='dband') opt.add_option('--dmaxtime', action='store', type='float', help='maximum time, for drawing fair comparisons', default=0, dest='dmaxtime') opt.add_option('--dnotooltip', action='store_true', help='disable tooltips', default=False, dest='dnotooltip') tdb-1.4.2/third_party/waf/waflib/extras/pch.py0000660000000000000000000001051413444661622021240 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Alexander Afanasyev (UCLA), 2014 """ Enable precompiled C++ header support (currently only clang++ and g++ are supported) To use this tool, wscript should look like: def options(opt): opt.load('pch') # This will add `--with-pch` configure option. # Unless --with-pch during configure stage specified, the precompiled header support is disabled def configure(conf): conf.load('pch') # this will set conf.env.WITH_PCH if --with-pch is specified and the supported compiler is used # Unless conf.env.WITH_PCH is set, the precompiled header support is disabled def build(bld): bld(features='cxx pch', target='precompiled-headers', name='precompiled-headers', headers='a.h b.h c.h', # headers to pre-compile into `precompiled-headers` # Other parameters to compile precompiled headers # includes=..., # export_includes=..., # use=..., # ... # Exported parameters will be propagated even if precompiled headers are disabled ) bld( target='test', features='cxx cxxprogram', source='a.cpp b.cpp d.cpp main.cpp', use='precompiled-headers', ) # or bld( target='test', features='pch cxx cxxprogram', source='a.cpp b.cpp d.cpp main.cpp', headers='a.h b.h c.h', ) Note that precompiled header must have multiple inclusion guards. If the guards are missing, any benefit of precompiled header will be voided and compilation may fail in some cases. """ import os from waflib import Task, TaskGen, Utils from waflib.Tools import c_preproc, cxx PCH_COMPILER_OPTIONS = { 'clang++': [['-include'], '.pch', ['-x', 'c++-header']], 'g++': [['-include'], '.gch', ['-x', 'c++-header']], } def options(opt): opt.add_option('--without-pch', action='store_false', default=True, dest='with_pch', help='''Try to use precompiled header to speed up compilation (only g++ and clang++)''') def configure(conf): if (conf.options.with_pch and conf.env['COMPILER_CXX'] in PCH_COMPILER_OPTIONS.keys()): conf.env.WITH_PCH = True flags = PCH_COMPILER_OPTIONS[conf.env['COMPILER_CXX']] conf.env.CXXPCH_F = flags[0] conf.env.CXXPCH_EXT = flags[1] conf.env.CXXPCH_FLAGS = flags[2] @TaskGen.feature('pch') @TaskGen.before('process_source') def apply_pch(self): if not self.env.WITH_PCH: return if getattr(self.bld, 'pch_tasks', None) is None: self.bld.pch_tasks = {} if getattr(self, 'headers', None) is None: return self.headers = self.to_nodes(self.headers) if getattr(self, 'name', None): try: task = self.bld.pch_tasks["%s.%s" % (self.name, self.idx)] self.bld.fatal("Duplicated 'pch' task with name %r" % "%s.%s" % (self.name, self.idx)) except KeyError: pass out = '%s.%d%s' % (self.target, self.idx, self.env['CXXPCH_EXT']) out = self.path.find_or_declare(out) task = self.create_task('gchx', self.headers, out) # target should be an absolute path of `out`, but without precompiled header extension task.target = out.abspath()[:-len(out.suffix())] self.pch_task = task if getattr(self, 'name', None): self.bld.pch_tasks["%s.%s" % (self.name, self.idx)] = task @TaskGen.feature('cxx') @TaskGen.after_method('process_source', 'propagate_uselib_vars') def add_pch(self): if not (self.env['WITH_PCH'] and getattr(self, 'use', None) and getattr(self, 'compiled_tasks', None) and getattr(self.bld, 'pch_tasks', None)): return pch = None # find pch task, if any if getattr(self, 'pch_task', None): pch = self.pch_task else: for use in Utils.to_list(self.use): try: pch = self.bld.pch_tasks[use] except KeyError: pass if pch: for x in self.compiled_tasks: x.env.append_value('CXXFLAGS', self.env['CXXPCH_F'] + [pch.target]) class gchx(Task.Task): run_str = '${CXX} ${ARCH_ST:ARCH} ${CXXFLAGS} ${CXXPCH_FLAGS} ${FRAMEWORKPATH_ST:FRAMEWORKPATH} ${CPPPATH_ST:INCPATHS} ${DEFINES_ST:DEFINES} ${CXXPCH_F:SRC} ${CXX_SRC_F}${SRC[0].abspath()} ${CXX_TGT_F}${TGT[0].abspath()} ${CPPFLAGS}' scan = c_preproc.scan color = 'BLUE' ext_out=['.h'] def runnable_status(self): try: node_deps = self.generator.bld.node_deps[self.uid()] except KeyError: node_deps = [] ret = Task.Task.runnable_status(self) if ret == Task.SKIP_ME and self.env.CXX_NAME == 'clang': t = os.stat(self.outputs[0].abspath()).st_mtime for n in self.inputs + node_deps: if os.stat(n.abspath()).st_mtime > t: return Task.RUN_ME return ret tdb-1.4.2/third_party/waf/waflib/extras/pep8.py0000660000000000000000000000662413444661622021351 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # # written by Sylvain Rouquette, 2011 ''' Install pep8 module: $ easy_install pep8 or $ pip install pep8 To add the pep8 tool to the waf file: $ ./waf-light --tools=compat15,pep8 or, if you have waf >= 1.6.2 $ ./waf update --files=pep8 Then add this to your wscript: [at]extension('.py', 'wscript') def run_pep8(self, node): self.create_task('Pep8', node) ''' import threading from waflib import Task, Options pep8 = __import__('pep8') class Pep8(Task.Task): color = 'PINK' lock = threading.Lock() def check_options(self): if pep8.options: return pep8.options = Options.options pep8.options.prog = 'pep8' excl = pep8.options.exclude.split(',') pep8.options.exclude = [s.rstrip('/') for s in excl] if pep8.options.filename: pep8.options.filename = pep8.options.filename.split(',') if pep8.options.select: pep8.options.select = pep8.options.select.split(',') else: pep8.options.select = [] if pep8.options.ignore: pep8.options.ignore = pep8.options.ignore.split(',') elif pep8.options.select: # Ignore all checks which are not explicitly selected pep8.options.ignore = [''] elif pep8.options.testsuite or pep8.options.doctest: # For doctest and testsuite, all checks are required pep8.options.ignore = [] else: # The default choice: ignore controversial checks pep8.options.ignore = pep8.DEFAULT_IGNORE.split(',') pep8.options.physical_checks = pep8.find_checks('physical_line') pep8.options.logical_checks = pep8.find_checks('logical_line') pep8.options.counters = dict.fromkeys(pep8.BENCHMARK_KEYS, 0) pep8.options.messages = {} def run(self): with Pep8.lock: self.check_options() pep8.input_file(self.inputs[0].abspath()) return 0 if not pep8.get_count() else -1 def options(opt): opt.add_option('-q', '--quiet', default=0, action='count', help="report only file names, or nothing with -qq") opt.add_option('-r', '--repeat', action='store_true', help="show all occurrences of the same error") opt.add_option('--exclude', metavar='patterns', default=pep8.DEFAULT_EXCLUDE, help="exclude files or directories which match these " "comma separated patterns (default: %s)" % pep8.DEFAULT_EXCLUDE, dest='exclude') opt.add_option('--filename', metavar='patterns', default='*.py', help="when parsing directories, only check filenames " "matching these comma separated patterns (default: " "*.py)") opt.add_option('--select', metavar='errors', default='', help="select errors and warnings (e.g. E,W6)") opt.add_option('--ignore', metavar='errors', default='', help="skip errors and warnings (e.g. E4,W)") opt.add_option('--show-source', action='store_true', help="show source code for each error") opt.add_option('--show-pep8', action='store_true', help="show text of PEP 8 for each error") opt.add_option('--statistics', action='store_true', help="count errors and warnings") opt.add_option('--count', action='store_true', help="print total number of errors and warnings " "to standard error and set exit code to 1 if " "total is not null") opt.add_option('--benchmark', action='store_true', help="measure processing speed") opt.add_option('--testsuite', metavar='dir', help="run regression tests from dir") opt.add_option('--doctest', action='store_true', help="run doctest on myself") tdb-1.4.2/third_party/waf/waflib/extras/pgicc.py0000660000000000000000000000331313527011455021545 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Antoine Dechaume 2011 """ Detect the PGI C compiler """ import sys, re from waflib import Errors from waflib.Configure import conf from waflib.Tools.compiler_c import c_compiler c_compiler['linux'].append('pgicc') @conf def find_pgi_compiler(conf, var, name): """ Find the program name, and execute it to ensure it really is itself. """ if sys.platform == 'cygwin': conf.fatal('The PGI compiler does not work on Cygwin') v = conf.env cc = None if v[var]: cc = v[var] elif var in conf.environ: cc = conf.environ[var] if not cc: cc = conf.find_program(name, var=var) if not cc: conf.fatal('PGI Compiler (%s) was not found' % name) v[var + '_VERSION'] = conf.get_pgi_version(cc) v[var] = cc v[var + '_NAME'] = 'pgi' @conf def get_pgi_version(conf, cc): """Find the version of a pgi compiler.""" version_re = re.compile(r"The Portland Group", re.I).search cmd = cc + ['-V', '-E'] # Issue 1078, prevent wrappers from linking try: out, err = conf.cmd_and_log(cmd, output=0) except Errors.WafError: conf.fatal('Could not find pgi compiler %r' % cmd) if out: match = version_re(out) else: match = version_re(err) if not match: conf.fatal('Could not verify PGI signature') cmd = cc + ['-help=variable'] try: out, err = conf.cmd_and_log(cmd, output=0) except Errors.WafError: conf.fatal('Could not find pgi compiler %r' % cmd) version = re.findall(r'^COMPVER\s*=(.*)', out, re.M) if len(version) != 1: conf.fatal('Could not determine the compiler version') return version[0] def configure(conf): conf.find_pgi_compiler('CC', 'pgcc') conf.find_ar() conf.gcc_common_flags() conf.cc_load_tools() conf.cc_add_flags() conf.link_add_flags() tdb-1.4.2/third_party/waf/waflib/extras/pgicxx.py0000660000000000000000000000061313444661622021767 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Antoine Dechaume 2011 """ Detect the PGI C++ compiler """ from waflib.Tools.compiler_cxx import cxx_compiler cxx_compiler['linux'].append('pgicxx') from waflib.extras import pgicc def configure(conf): conf.find_pgi_compiler('CXX', 'pgCC') conf.find_ar() conf.gxx_common_flags() conf.cxx_load_tools() conf.cxx_add_flags() conf.link_add_flags() tdb-1.4.2/third_party/waf/waflib/extras/proc.py0000660000000000000000000000327513444661622021437 0ustar rootroot00000000000000#! /usr/bin/env python # per rosengren 2011 from os import environ, path from waflib import TaskGen, Utils def options(opt): grp = opt.add_option_group('Oracle ProC Options') grp.add_option('--oracle_home', action='store', default=environ.get('PROC_ORACLE'), help='Path to Oracle installation home (has bin/lib)') grp.add_option('--tns_admin', action='store', default=environ.get('TNS_ADMIN'), help='Directory containing server list (TNS_NAMES.ORA)') grp.add_option('--connection', action='store', default='dummy-user/dummy-password@dummy-server', help='Format: user/password@server') def configure(cnf): env = cnf.env if not env.PROC_ORACLE: env.PROC_ORACLE = cnf.options.oracle_home if not env.PROC_TNS_ADMIN: env.PROC_TNS_ADMIN = cnf.options.tns_admin if not env.PROC_CONNECTION: env.PROC_CONNECTION = cnf.options.connection cnf.find_program('proc', var='PROC', path_list=env.PROC_ORACLE + path.sep + 'bin') def proc(tsk): env = tsk.env gen = tsk.generator inc_nodes = gen.to_incnodes(Utils.to_list(getattr(gen,'includes',[])) + env['INCLUDES']) cmd = ( [env.PROC] + ['SQLCHECK=SEMANTICS'] + (['SYS_INCLUDE=(' + ','.join(env.PROC_INCLUDES) + ')'] if env.PROC_INCLUDES else []) + ['INCLUDE=(' + ','.join( [i.bldpath() for i in inc_nodes] ) + ')'] + ['userid=' + env.PROC_CONNECTION] + ['INAME=' + tsk.inputs[0].bldpath()] + ['ONAME=' + tsk.outputs[0].bldpath()] ) exec_env = { 'ORACLE_HOME': env.PROC_ORACLE, 'LD_LIBRARY_PATH': env.PROC_ORACLE + path.sep + 'lib', } if env.PROC_TNS_ADMIN: exec_env['TNS_ADMIN'] = env.PROC_TNS_ADMIN return tsk.exec_command(cmd, env=exec_env) TaskGen.declare_chain( name = 'proc', rule = proc, ext_in = '.pc', ext_out = '.c', ) tdb-1.4.2/third_party/waf/waflib/extras/protoc.py0000660000000000000000000001533113527011455021771 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Philipp Bender, 2012 # Matt Clarkson, 2012 import re, os from waflib.Task import Task from waflib.TaskGen import extension from waflib import Errors, Context, Logs """ A simple tool to integrate protocol buffers into your build system. Example for C++: def configure(conf): conf.load('compiler_cxx cxx protoc') def build(bld): bld( features = 'cxx cxxprogram' source = 'main.cpp file1.proto proto/file2.proto', includes = '. proto', target = 'executable') Example for Python: def configure(conf): conf.load('python protoc') def build(bld): bld( features = 'py' source = 'main.py file1.proto proto/file2.proto', protoc_includes = 'proto') Example for both Python and C++ at same time: def configure(conf): conf.load('cxx python protoc') def build(bld): bld( features = 'cxx py' source = 'file1.proto proto/file2.proto', protoc_includes = 'proto') # or includes Example for Java: def options(opt): opt.load('java') def configure(conf): conf.load('python java protoc') # Here you have to point to your protobuf-java JAR and have it in classpath conf.env.CLASSPATH_PROTOBUF = ['protobuf-java-2.5.0.jar'] def build(bld): bld( features = 'javac protoc', name = 'pbjava', srcdir = 'inc/ src', # directories used by javac source = ['inc/message_inc.proto', 'inc/message.proto'], # source is used by protoc for .proto files use = 'PROTOBUF', protoc_includes = ['inc']) # for protoc to search dependencies Protoc includes passed via protoc_includes are either relative to the taskgen or to the project and are searched in this order. Include directories external to the waf project can also be passed to the extra by using protoc_extincludes protoc_extincludes = ['/usr/include/pblib'] Notes when using this tool: - protoc command line parsing is tricky. The generated files can be put in subfolders which depend on the order of the include paths. Try to be simple when creating task generators containing protoc stuff. """ class protoc(Task): run_str = '${PROTOC} ${PROTOC_FL:PROTOC_FLAGS} ${PROTOC_ST:INCPATHS} ${PROTOC_ST:PROTOC_INCPATHS} ${PROTOC_ST:PROTOC_EXTINCPATHS} ${SRC[0].bldpath()}' color = 'BLUE' ext_out = ['.h', 'pb.cc', '.py', '.java'] def scan(self): """ Scan .proto dependencies """ node = self.inputs[0] nodes = [] names = [] seen = [] search_nodes = [] if not node: return (nodes, names) if 'cxx' in self.generator.features: search_nodes = self.generator.includes_nodes if 'py' in self.generator.features or 'javac' in self.generator.features: for incpath in getattr(self.generator, 'protoc_includes', []): incpath_node = self.generator.path.find_node(incpath) if incpath_node: search_nodes.append(incpath_node) else: # Check if relative to top-level for extra tg dependencies incpath_node = self.generator.bld.path.find_node(incpath) if incpath_node: search_nodes.append(incpath_node) else: raise Errors.WafError('protoc: include path %r does not exist' % incpath) def parse_node(node): if node in seen: return seen.append(node) code = node.read().splitlines() for line in code: m = re.search(r'^import\s+"(.*)";.*(//)?.*', line) if m: dep = m.groups()[0] for incnode in search_nodes: found = incnode.find_resource(dep) if found: nodes.append(found) parse_node(found) else: names.append(dep) parse_node(node) # Add also dependencies path to INCPATHS so protoc will find the included file for deppath in nodes: self.env.append_unique('INCPATHS', deppath.parent.bldpath()) return (nodes, names) @extension('.proto') def process_protoc(self, node): incdirs = [] out_nodes = [] protoc_flags = [] # ensure PROTOC_FLAGS is a list; a copy is used below anyway self.env.PROTOC_FLAGS = self.to_list(self.env.PROTOC_FLAGS) if 'cxx' in self.features: cpp_node = node.change_ext('.pb.cc') hpp_node = node.change_ext('.pb.h') self.source.append(cpp_node) out_nodes.append(cpp_node) out_nodes.append(hpp_node) protoc_flags.append('--cpp_out=%s' % node.parent.get_bld().bldpath()) if 'py' in self.features: py_node = node.change_ext('_pb2.py') self.source.append(py_node) out_nodes.append(py_node) protoc_flags.append('--python_out=%s' % node.parent.get_bld().bldpath()) if 'javac' in self.features: # Make javac get also pick java code generated in build if not node.parent.get_bld() in self.javac_task.srcdir: self.javac_task.srcdir.append(node.parent.get_bld()) protoc_flags.append('--java_out=%s' % node.parent.get_bld().bldpath()) node.parent.get_bld().mkdir() tsk = self.create_task('protoc', node, out_nodes) tsk.env.append_value('PROTOC_FLAGS', protoc_flags) if 'javac' in self.features: self.javac_task.set_run_after(tsk) # Instruct protoc where to search for .proto included files. # For C++ standard include files dirs are used, # but this doesn't apply to Python for example for incpath in getattr(self, 'protoc_includes', []): incpath_node = self.path.find_node(incpath) if incpath_node: incdirs.append(incpath_node.bldpath()) else: # Check if relative to top-level for extra tg dependencies incpath_node = self.bld.path.find_node(incpath) if incpath_node: incdirs.append(incpath_node.bldpath()) else: raise Errors.WafError('protoc: include path %r does not exist' % incpath) tsk.env.PROTOC_INCPATHS = incdirs # Include paths external to the waf project (ie. shared pb repositories) tsk.env.PROTOC_EXTINCPATHS = getattr(self, 'protoc_extincludes', []) # PR2115: protoc generates output of .proto files in nested # directories by canonicalizing paths. To avoid this we have to pass # as first include the full directory file of the .proto file tsk.env.prepend_value('INCPATHS', node.parent.bldpath()) use = getattr(self, 'use', '') if not 'PROTOBUF' in use: self.use = self.to_list(use) + ['PROTOBUF'] def configure(conf): conf.check_cfg(package='protobuf', uselib_store='PROTOBUF', args=['--cflags', '--libs']) conf.find_program('protoc', var='PROTOC') conf.start_msg('Checking for protoc version') protocver = conf.cmd_and_log(conf.env.PROTOC + ['--version'], output=Context.BOTH) protocver = ''.join(protocver).strip()[protocver[0].rfind(' ')+1:] conf.end_msg(protocver) conf.env.PROTOC_MAJOR = protocver[:protocver.find('.')] conf.env.PROTOC_ST = '-I%s' conf.env.PROTOC_FL = '%s' tdb-1.4.2/third_party/waf/waflib/extras/pyqt5.py0000660000000000000000000001610713527011455021547 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Federico Pellegrin, 2016-2019 (fedepell) adapted for Python """ This tool helps with finding Python Qt5 tools and libraries, and provides translation from QT5 files to Python code. The following snippet illustrates the tool usage:: def options(opt): opt.load('py pyqt5') def configure(conf): conf.load('py pyqt5') def build(bld): bld( features = 'py pyqt5', source = 'main.py textures.qrc aboutDialog.ui', ) Here, the UI description and resource files will be processed to generate code. Usage ===== Load the "pyqt5" tool. Add into the sources list also the qrc resources files or ui5 definition files and they will be translated into python code with the system tools (PyQt5, PySide2, PyQt4 are searched in this order) and then compiled """ try: from xml.sax import make_parser from xml.sax.handler import ContentHandler except ImportError: has_xml = False ContentHandler = object else: has_xml = True import os from waflib.Tools import python from waflib import Task, Options from waflib.TaskGen import feature, extension from waflib.Configure import conf from waflib import Logs EXT_RCC = ['.qrc'] """ File extension for the resource (.qrc) files """ EXT_UI = ['.ui'] """ File extension for the user interface (.ui) files """ class XMLHandler(ContentHandler): """ Parses ``.qrc`` files """ def __init__(self): self.buf = [] self.files = [] def startElement(self, name, attrs): if name == 'file': self.buf = [] def endElement(self, name): if name == 'file': self.files.append(str(''.join(self.buf))) def characters(self, cars): self.buf.append(cars) @extension(*EXT_RCC) def create_pyrcc_task(self, node): "Creates rcc and py task for ``.qrc`` files" rcnode = node.change_ext('.py') self.create_task('pyrcc', node, rcnode) if getattr(self, 'install_from', None): self.install_from = self.install_from.get_bld() else: self.install_from = self.path.get_bld() self.install_path = getattr(self, 'install_path', '${PYTHONDIR}') self.process_py(rcnode) @extension(*EXT_UI) def create_pyuic_task(self, node): "Create uic tasks and py for user interface ``.ui`` definition files" uinode = node.change_ext('.py') self.create_task('ui5py', node, uinode) if getattr(self, 'install_from', None): self.install_from = self.install_from.get_bld() else: self.install_from = self.path.get_bld() self.install_path = getattr(self, 'install_path', '${PYTHONDIR}') self.process_py(uinode) @extension('.ts') def add_pylang(self, node): """Adds all the .ts file into ``self.lang``""" self.lang = self.to_list(getattr(self, 'lang', [])) + [node] @feature('pyqt5') def apply_pyqt5(self): """ The additional parameters are: :param lang: list of translation files (\\*.ts) to process :type lang: list of :py:class:`waflib.Node.Node` or string without the .ts extension :param langname: if given, transform the \\*.ts files into a .qrc files to include in the binary file :type langname: :py:class:`waflib.Node.Node` or string without the .qrc extension """ if getattr(self, 'lang', None): qmtasks = [] for x in self.to_list(self.lang): if isinstance(x, str): x = self.path.find_resource(x + '.ts') qmtasks.append(self.create_task('ts2qm', x, x.change_ext('.qm'))) if getattr(self, 'langname', None): qmnodes = [k.outputs[0] for k in qmtasks] rcnode = self.langname if isinstance(rcnode, str): rcnode = self.path.find_or_declare(rcnode + '.qrc') t = self.create_task('qm2rcc', qmnodes, rcnode) create_pyrcc_task(self, t.outputs[0]) class pyrcc(Task.Task): """ Processes ``.qrc`` files """ color = 'BLUE' run_str = '${QT_PYRCC} ${SRC} -o ${TGT}' ext_out = ['.py'] def rcname(self): return os.path.splitext(self.inputs[0].name)[0] def scan(self): """Parse the *.qrc* files""" if not has_xml: Logs.error('No xml.sax support was found, rcc dependencies will be incomplete!') return ([], []) parser = make_parser() curHandler = XMLHandler() parser.setContentHandler(curHandler) fi = open(self.inputs[0].abspath(), 'r') try: parser.parse(fi) finally: fi.close() nodes = [] names = [] root = self.inputs[0].parent for x in curHandler.files: nd = root.find_resource(x) if nd: nodes.append(nd) else: names.append(x) return (nodes, names) class ui5py(Task.Task): """ Processes ``.ui`` files for python """ color = 'BLUE' run_str = '${QT_PYUIC} ${SRC} -o ${TGT}' ext_out = ['.py'] class ts2qm(Task.Task): """ Generates ``.qm`` files from ``.ts`` files """ color = 'BLUE' run_str = '${QT_LRELEASE} ${QT_LRELEASE_FLAGS} ${SRC} -qm ${TGT}' class qm2rcc(Task.Task): """ Generates ``.qrc`` files from ``.qm`` files """ color = 'BLUE' after = 'ts2qm' def run(self): """Create a qrc file including the inputs""" txt = '\n'.join(['%s' % k.path_from(self.outputs[0].parent) for k in self.inputs]) code = '\n\n%s\n\n' % txt self.outputs[0].write(code) def configure(self): self.find_pyqt5_binaries() # warn about this during the configuration too if not has_xml: Logs.error('No xml.sax support was found, rcc dependencies will be incomplete!') @conf def find_pyqt5_binaries(self): """ Detects PyQt5 or PySide2 programs such as pyuic5/pyside2-uic, pyrcc5/pyside2-rcc """ env = self.env if getattr(Options.options, 'want_pyqt5', True): self.find_program(['pyuic5'], var='QT_PYUIC') self.find_program(['pyrcc5'], var='QT_PYRCC') self.find_program(['pylupdate5'], var='QT_PYLUPDATE') elif getattr(Options.options, 'want_pyside2', True): self.find_program(['pyside2-uic'], var='QT_PYUIC') self.find_program(['pyside2-rcc'], var='QT_PYRCC') self.find_program(['pyside2-lupdate'], var='QT_PYLUPDATE') elif getattr(Options.options, 'want_pyqt4', True): self.find_program(['pyuic4'], var='QT_PYUIC') self.find_program(['pyrcc4'], var='QT_PYRCC') self.find_program(['pylupdate4'], var='QT_PYLUPDATE') else: self.find_program(['pyuic5','pyside2-uic','pyuic4'], var='QT_PYUIC') self.find_program(['pyrcc5','pyside2-rcc','pyrcc4'], var='QT_PYRCC') self.find_program(['pylupdate5', 'pyside2-lupdate','pylupdate4'], var='QT_PYLUPDATE') if not env.QT_PYUIC: self.fatal('cannot find the uic compiler for python for qt5') if not env.QT_PYRCC: self.fatal('cannot find the rcc compiler for python for qt5') self.find_program(['lrelease-qt5', 'lrelease'], var='QT_LRELEASE') def options(opt): """ Command-line options """ pyqt5opt=opt.add_option_group("Python QT5 Options") pyqt5opt.add_option('--pyqt5-pyqt5', action='store_true', default=False, dest='want_pyqt5', help='use PyQt5 bindings as python QT5 bindings (default PyQt5 is searched first, PySide2 after, PyQt4 last)') pyqt5opt.add_option('--pyqt5-pyside2', action='store_true', default=False, dest='want_pyside2', help='use PySide2 bindings as python QT5 bindings (default PyQt5 is searched first, PySide2 after, PyQt4 last)') pyqt5opt.add_option('--pyqt5-pyqt4', action='store_true', default=False, dest='want_pyqt4', help='use PyQt4 bindings as python QT5 bindings (default PyQt5 is searched first, PySide2 after, PyQt4 last)') tdb-1.4.2/third_party/waf/waflib/extras/pytest.py0000660000000000000000000002014113444661622022013 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Calle Rosenquist, 2016-2018 (xbreak) """ Provides Python unit test support using :py:class:`waflib.Tools.waf_unit_test.utest` task via the **pytest** feature. To use pytest the following is needed: 1. Load `pytest` and the dependency `waf_unit_test` tools. 2. Create a task generator with feature `pytest` (not `test`) and customize behaviour with the following attributes: - `pytest_source`: Test input files. - `ut_str`: Test runner command, e.g. ``${PYTHON} -B -m unittest discover`` or if nose is used: ``${NOSETESTS} --no-byte-compile ${SRC}``. - `ut_shell`: Determines if ``ut_str`` is executed in a shell. Default: False. - `ut_cwd`: Working directory for test runner. Defaults to directory of first ``pytest_source`` file. Additionally the following `pytest` specific attributes are used in dependent taskgens: - `pytest_path`: Node or string list of additional Python paths. - `pytest_libpath`: Node or string list of additional library paths. The `use` dependencies are used for both update calculation and to populate the following environment variables for the `pytest` test runner: 1. `PYTHONPATH` (`sys.path`) of any dependent taskgen that has the feature `py`: - `install_from` attribute is used to determine where the root of the Python sources are located. If `install_from` is not specified the default is to use the taskgen path as the root. - `pytest_path` attribute is used to manually specify additional Python paths. 2. Dynamic linker search path variable (e.g. `LD_LIBRARY_PATH`) of any dependent taskgen with non-static link_task. - `pytest_libpath` attribute is used to manually specify additional linker paths. Note: `pytest` cannot automatically determine the correct `PYTHONPATH` for `pyext` taskgens because the extension might be part of a Python package or used standalone: - When used as part of another `py` package, the `PYTHONPATH` is provided by that taskgen so no additional action is required. - When used as a standalone module, the user needs to specify the `PYTHONPATH` explicitly via the `pytest_path` attribute on the `pyext` taskgen. For details c.f. the pytest playground examples. For example:: # A standalone Python C extension that demonstrates unit test environment population # of PYTHONPATH and LD_LIBRARY_PATH/PATH/DYLD_LIBRARY_PATH. # # Note: `pytest_path` is provided here because pytest cannot automatically determine # if the extension is part of another Python package or is used standalone. bld(name = 'foo_ext', features = 'c cshlib pyext', source = 'src/foo_ext.c', target = 'foo_ext', pytest_path = [ bld.path.get_bld() ]) # Python package under test that also depend on the Python module `foo_ext` # # Note: `install_from` is added automatically to `PYTHONPATH`. bld(name = 'foo', features = 'py', use = 'foo_ext', source = bld.path.ant_glob('src/foo/*.py'), install_from = 'src') # Unit test example using the built in module unittest and let that discover # any test cases. bld(name = 'foo_test', features = 'pytest', use = 'foo', pytest_source = bld.path.ant_glob('test/*.py'), ut_str = '${PYTHON} -B -m unittest discover') """ import os from waflib import Task, TaskGen, Errors, Utils, Logs from waflib.Tools import ccroot def _process_use_rec(self, name): """ Recursively process ``use`` for task generator with name ``name``.. Used by pytest_process_use. """ if name in self.pytest_use_not or name in self.pytest_use_seen: return try: tg = self.bld.get_tgen_by_name(name) except Errors.WafError: self.pytest_use_not.add(name) return self.pytest_use_seen.append(name) tg.post() for n in self.to_list(getattr(tg, 'use', [])): _process_use_rec(self, n) @TaskGen.feature('pytest') @TaskGen.after_method('process_source', 'apply_link') def pytest_process_use(self): """ Process the ``use`` attribute which contains a list of task generator names and store paths that later is used to populate the unit test runtime environment. """ self.pytest_use_not = set() self.pytest_use_seen = [] self.pytest_paths = [] # strings or Nodes self.pytest_libpaths = [] # strings or Nodes self.pytest_dep_nodes = [] names = self.to_list(getattr(self, 'use', [])) for name in names: _process_use_rec(self, name) def extend_unique(lst, varlst): ext = [] for x in varlst: if x not in lst: ext.append(x) lst.extend(ext) # Collect type specific info needed to construct a valid runtime environment # for the test. for name in self.pytest_use_seen: tg = self.bld.get_tgen_by_name(name) extend_unique(self.pytest_paths, Utils.to_list(getattr(tg, 'pytest_path', []))) extend_unique(self.pytest_libpaths, Utils.to_list(getattr(tg, 'pytest_libpath', []))) if 'py' in tg.features: # Python dependencies are added to PYTHONPATH pypath = getattr(tg, 'install_from', tg.path) if 'buildcopy' in tg.features: # Since buildcopy is used we assume that PYTHONPATH in build should be used, # not source extend_unique(self.pytest_paths, [pypath.get_bld().abspath()]) # Add buildcopy output nodes to dependencies extend_unique(self.pytest_dep_nodes, [o for task in getattr(tg, 'tasks', []) \ for o in getattr(task, 'outputs', [])]) else: # If buildcopy is not used, depend on sources instead extend_unique(self.pytest_dep_nodes, tg.source) extend_unique(self.pytest_paths, [pypath.abspath()]) if getattr(tg, 'link_task', None): # For tasks with a link_task (C, C++, D et.c.) include their library paths: if not isinstance(tg.link_task, ccroot.stlink_task): extend_unique(self.pytest_dep_nodes, tg.link_task.outputs) extend_unique(self.pytest_libpaths, tg.link_task.env.LIBPATH) if 'pyext' in tg.features: # If the taskgen is extending Python we also want to add the interpreter libpath. extend_unique(self.pytest_libpaths, tg.link_task.env.LIBPATH_PYEXT) else: # Only add to libpath if the link task is not a Python extension extend_unique(self.pytest_libpaths, [tg.link_task.outputs[0].parent.abspath()]) @TaskGen.feature('pytest') @TaskGen.after_method('pytest_process_use') def make_pytest(self): """ Creates a ``utest`` task with a populated environment for Python if not specified in ``ut_env``: - Paths in `pytest_paths` attribute are used to populate PYTHONPATH - Paths in `pytest_libpaths` attribute are used to populate the system library path (e.g. LD_LIBRARY_PATH) """ nodes = self.to_nodes(self.pytest_source) tsk = self.create_task('utest', nodes) tsk.dep_nodes.extend(self.pytest_dep_nodes) if getattr(self, 'ut_str', None): self.ut_run, lst = Task.compile_fun(self.ut_str, shell=getattr(self, 'ut_shell', False)) tsk.vars = lst + tsk.vars if getattr(self, 'ut_cwd', None): if isinstance(self.ut_cwd, str): # we want a Node instance if os.path.isabs(self.ut_cwd): self.ut_cwd = self.bld.root.make_node(self.ut_cwd) else: self.ut_cwd = self.path.make_node(self.ut_cwd) else: if tsk.inputs: self.ut_cwd = tsk.inputs[0].parent else: raise Errors.WafError("no valid input files for pytest task, check pytest_source value") if not self.ut_cwd.exists(): self.ut_cwd.mkdir() if not hasattr(self, 'ut_env'): self.ut_env = dict(os.environ) def add_paths(var, lst): # Add list of paths to a variable, lst can contain strings or nodes lst = [ str(n) for n in lst ] Logs.debug("ut: %s: Adding paths %s=%s", self, var, lst) self.ut_env[var] = os.pathsep.join(lst) + os.pathsep + self.ut_env.get(var, '') # Prepend dependency paths to PYTHONPATH and LD_LIBRARY_PATH add_paths('PYTHONPATH', self.pytest_paths) if Utils.is_win32: add_paths('PATH', self.pytest_libpaths) elif Utils.unversioned_sys_platform() == 'darwin': add_paths('DYLD_LIBRARY_PATH', self.pytest_libpaths) add_paths('LD_LIBRARY_PATH', self.pytest_libpaths) else: add_paths('LD_LIBRARY_PATH', self.pytest_libpaths) tdb-1.4.2/third_party/waf/waflib/extras/qnxnto.py0000660000000000000000000000356113444661622022021 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Jérôme Carretero 2011 (zougloub) # QNX neutrino compatibility functions import sys, os from waflib import Utils class Popen(object): """ Popen cannot work on QNX from a threaded program: Forking in threads is not implemented in neutrino. Python's os.popen / spawn / fork won't work when running in threads (they will if in the main program thread) In waf, this happens mostly in build. And the use cases can be replaced by os.system() calls. """ __slots__ = ["prog", "kw", "popen", "verbose"] verbose = 0 def __init__(self, prog, **kw): try: self.prog = prog self.kw = kw self.popen = None if Popen.verbose: sys.stdout.write("Popen created: %r, kw=%r..." % (prog, kw)) do_delegate = kw.get('stdout') == -1 and kw.get('stderr') == -1 if do_delegate: if Popen.verbose: print("Delegating to real Popen") self.popen = self.real_Popen(prog, **kw) else: if Popen.verbose: print("Emulating") except Exception as e: if Popen.verbose: print("Exception: %s" % e) raise def __getattr__(self, name): if Popen.verbose: sys.stdout.write("Getattr: %s..." % name) if name in Popen.__slots__: return object.__getattribute__(self, name) else: if self.popen is not None: if Popen.verbose: print("from Popen") return getattr(self.popen, name) else: if name == "wait": return self.emu_wait else: raise Exception("subprocess emulation: not implemented: %s" % name) def emu_wait(self): if Popen.verbose: print("emulated wait (%r kw=%r)" % (self.prog, self.kw)) if isinstance(self.prog, str): cmd = self.prog else: cmd = " ".join(self.prog) if 'cwd' in self.kw: cmd = 'cd "%s" && %s' % (self.kw['cwd'], cmd) return os.system(cmd) if sys.platform == "qnx6": Popen.real_Popen = Utils.subprocess.Popen Utils.subprocess.Popen = Popen tdb-1.4.2/third_party/waf/waflib/extras/qt4.py0000660000000000000000000004752213527011455021202 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2010 (ita) """ Tool Description ================ This tool helps with finding Qt4 tools and libraries, and also provides syntactic sugar for using Qt4 tools. The following snippet illustrates the tool usage:: def options(opt): opt.load('compiler_cxx qt4') def configure(conf): conf.load('compiler_cxx qt4') def build(bld): bld( features = 'qt4 cxx cxxprogram', uselib = 'QTCORE QTGUI QTOPENGL QTSVG', source = 'main.cpp textures.qrc aboutDialog.ui', target = 'window', ) Here, the UI description and resource files will be processed to generate code. Usage ===== Load the "qt4" tool. You also need to edit your sources accordingly: - the normal way of doing things is to have your C++ files include the .moc file. This is regarded as the best practice (and provides much faster compilations). It also implies that the include paths have beenset properly. - to have the include paths added automatically, use the following:: from waflib.TaskGen import feature, before_method, after_method @feature('cxx') @after_method('process_source') @before_method('apply_incpaths') def add_includes_paths(self): incs = set(self.to_list(getattr(self, 'includes', ''))) for x in self.compiled_tasks: incs.add(x.inputs[0].parent.path_from(self.path)) self.includes = sorted(incs) Note: another tool provides Qt processing that does not require .moc includes, see 'playground/slow_qt/'. A few options (--qt{dir,bin,...}) and environment variables (QT4_{ROOT,DIR,MOC,UIC,XCOMPILE}) allow finer tuning of the tool, tool path selection, etc; please read the source for more info. """ try: from xml.sax import make_parser from xml.sax.handler import ContentHandler except ImportError: has_xml = False ContentHandler = object else: has_xml = True import os, sys from waflib.Tools import cxx from waflib import Task, Utils, Options, Errors, Context from waflib.TaskGen import feature, after_method, extension from waflib.Configure import conf from waflib import Logs MOC_H = ['.h', '.hpp', '.hxx', '.hh'] """ File extensions associated to the .moc files """ EXT_RCC = ['.qrc'] """ File extension for the resource (.qrc) files """ EXT_UI = ['.ui'] """ File extension for the user interface (.ui) files """ EXT_QT4 = ['.cpp', '.cc', '.cxx', '.C'] """ File extensions of C++ files that may require a .moc processing """ QT4_LIBS = "QtCore QtGui QtUiTools QtNetwork QtOpenGL QtSql QtSvg QtTest QtXml QtXmlPatterns QtWebKit Qt3Support QtHelp QtScript QtDeclarative QtDesigner" class qxx(Task.classes['cxx']): """ Each C++ file can have zero or several .moc files to create. They are known only when the files are scanned (preprocessor) To avoid scanning the c++ files each time (parsing C/C++), the results are retrieved from the task cache (bld.node_deps/bld.raw_deps). The moc tasks are also created *dynamically* during the build. """ def __init__(self, *k, **kw): Task.Task.__init__(self, *k, **kw) self.moc_done = 0 def runnable_status(self): """ Compute the task signature to make sure the scanner was executed. Create the moc tasks by using :py:meth:`waflib.Tools.qt4.qxx.add_moc_tasks` (if necessary), then postpone the task execution (there is no need to recompute the task signature). """ if self.moc_done: return Task.Task.runnable_status(self) else: for t in self.run_after: if not t.hasrun: return Task.ASK_LATER self.add_moc_tasks() return Task.Task.runnable_status(self) def create_moc_task(self, h_node, m_node): """ If several libraries use the same classes, it is possible that moc will run several times (Issue 1318) It is not possible to change the file names, but we can assume that the moc transformation will be identical, and the moc tasks can be shared in a global cache. The defines passed to moc will then depend on task generator order. If this is not acceptable, then use the tool slow_qt4 instead (and enjoy the slow builds... :-( ) """ try: moc_cache = self.generator.bld.moc_cache except AttributeError: moc_cache = self.generator.bld.moc_cache = {} try: return moc_cache[h_node] except KeyError: tsk = moc_cache[h_node] = Task.classes['moc'](env=self.env, generator=self.generator) tsk.set_inputs(h_node) tsk.set_outputs(m_node) if self.generator: self.generator.tasks.append(tsk) # direct injection in the build phase (safe because called from the main thread) gen = self.generator.bld.producer gen.outstanding.append(tsk) gen.total += 1 return tsk def moc_h_ext(self): ext = [] try: ext = Options.options.qt_header_ext.split() except AttributeError: pass if not ext: ext = MOC_H return ext def add_moc_tasks(self): """ Create the moc tasks by looking in ``bld.raw_deps[self.uid()]`` """ node = self.inputs[0] bld = self.generator.bld try: # compute the signature once to know if there is a moc file to create self.signature() except KeyError: # the moc file may be referenced somewhere else pass else: # remove the signature, it must be recomputed with the moc task delattr(self, 'cache_sig') include_nodes = [node.parent] + self.generator.includes_nodes moctasks = [] mocfiles = set() for d in bld.raw_deps.get(self.uid(), []): if not d.endswith('.moc'): continue # process that base.moc only once if d in mocfiles: continue mocfiles.add(d) # find the source associated with the moc file h_node = None base2 = d[:-4] for x in include_nodes: for e in self.moc_h_ext(): h_node = x.find_node(base2 + e) if h_node: break if h_node: m_node = h_node.change_ext('.moc') break else: # foo.cpp -> foo.cpp.moc for k in EXT_QT4: if base2.endswith(k): for x in include_nodes: h_node = x.find_node(base2) if h_node: break if h_node: m_node = h_node.change_ext(k + '.moc') break if not h_node: raise Errors.WafError('No source found for %r which is a moc file' % d) # create the moc task task = self.create_moc_task(h_node, m_node) moctasks.append(task) # simple scheduler dependency: run the moc task before others self.run_after.update(set(moctasks)) self.moc_done = 1 class trans_update(Task.Task): """Update a .ts files from a list of C++ files""" run_str = '${QT_LUPDATE} ${SRC} -ts ${TGT}' color = 'BLUE' class XMLHandler(ContentHandler): """ Parser for *.qrc* files """ def __init__(self): self.buf = [] self.files = [] def startElement(self, name, attrs): if name == 'file': self.buf = [] def endElement(self, name): if name == 'file': self.files.append(str(''.join(self.buf))) def characters(self, cars): self.buf.append(cars) @extension(*EXT_RCC) def create_rcc_task(self, node): "Create rcc and cxx tasks for *.qrc* files" rcnode = node.change_ext('_rc.cpp') self.create_task('rcc', node, rcnode) cpptask = self.create_task('cxx', rcnode, rcnode.change_ext('.o')) try: self.compiled_tasks.append(cpptask) except AttributeError: self.compiled_tasks = [cpptask] return cpptask @extension(*EXT_UI) def create_uic_task(self, node): "hook for uic tasks" uictask = self.create_task('ui4', node) uictask.outputs = [self.path.find_or_declare(self.env['ui_PATTERN'] % node.name[:-3])] @extension('.ts') def add_lang(self, node): """add all the .ts file into self.lang""" self.lang = self.to_list(getattr(self, 'lang', [])) + [node] @feature('qt4') @after_method('apply_link') def apply_qt4(self): """ Add MOC_FLAGS which may be necessary for moc:: def build(bld): bld.program(features='qt4', source='main.cpp', target='app', use='QTCORE') The additional parameters are: :param lang: list of translation files (\\*.ts) to process :type lang: list of :py:class:`waflib.Node.Node` or string without the .ts extension :param update: whether to process the C++ files to update the \\*.ts files (use **waf --translate**) :type update: bool :param langname: if given, transform the \\*.ts files into a .qrc files to include in the binary file :type langname: :py:class:`waflib.Node.Node` or string without the .qrc extension """ if getattr(self, 'lang', None): qmtasks = [] for x in self.to_list(self.lang): if isinstance(x, str): x = self.path.find_resource(x + '.ts') qmtasks.append(self.create_task('ts2qm', x, x.change_ext('.qm'))) if getattr(self, 'update', None) and Options.options.trans_qt4: cxxnodes = [a.inputs[0] for a in self.compiled_tasks] + [ a.inputs[0] for a in self.tasks if getattr(a, 'inputs', None) and a.inputs[0].name.endswith('.ui')] for x in qmtasks: self.create_task('trans_update', cxxnodes, x.inputs) if getattr(self, 'langname', None): qmnodes = [x.outputs[0] for x in qmtasks] rcnode = self.langname if isinstance(rcnode, str): rcnode = self.path.find_or_declare(rcnode + '.qrc') t = self.create_task('qm2rcc', qmnodes, rcnode) k = create_rcc_task(self, t.outputs[0]) self.link_task.inputs.append(k.outputs[0]) lst = [] for flag in self.to_list(self.env['CXXFLAGS']): if len(flag) < 2: continue f = flag[0:2] if f in ('-D', '-I', '/D', '/I'): if (f[0] == '/'): lst.append('-' + flag[1:]) else: lst.append(flag) self.env.append_value('MOC_FLAGS', lst) @extension(*EXT_QT4) def cxx_hook(self, node): """ Re-map C++ file extensions to the :py:class:`waflib.Tools.qt4.qxx` task. """ return self.create_compiled_task('qxx', node) class rcc(Task.Task): """ Process *.qrc* files """ color = 'BLUE' run_str = '${QT_RCC} -name ${tsk.rcname()} ${SRC[0].abspath()} ${RCC_ST} -o ${TGT}' ext_out = ['.h'] def rcname(self): return os.path.splitext(self.inputs[0].name)[0] def scan(self): """Parse the *.qrc* files""" if not has_xml: Logs.error('no xml support was found, the rcc dependencies will be incomplete!') return ([], []) parser = make_parser() curHandler = XMLHandler() parser.setContentHandler(curHandler) fi = open(self.inputs[0].abspath(), 'r') try: parser.parse(fi) finally: fi.close() nodes = [] names = [] root = self.inputs[0].parent for x in curHandler.files: nd = root.find_resource(x) if nd: nodes.append(nd) else: names.append(x) return (nodes, names) class moc(Task.Task): """ Create *.moc* files """ color = 'BLUE' run_str = '${QT_MOC} ${MOC_FLAGS} ${MOCCPPPATH_ST:INCPATHS} ${MOCDEFINES_ST:DEFINES} ${SRC} ${MOC_ST} ${TGT}' def keyword(self): return "Creating" def __str__(self): return self.outputs[0].path_from(self.generator.bld.launch_node()) class ui4(Task.Task): """ Process *.ui* files """ color = 'BLUE' run_str = '${QT_UIC} ${SRC} -o ${TGT}' ext_out = ['.h'] class ts2qm(Task.Task): """ Create *.qm* files from *.ts* files """ color = 'BLUE' run_str = '${QT_LRELEASE} ${QT_LRELEASE_FLAGS} ${SRC} -qm ${TGT}' class qm2rcc(Task.Task): """ Transform *.qm* files into *.rc* files """ color = 'BLUE' after = 'ts2qm' def run(self): """Create a qrc file including the inputs""" txt = '\n'.join(['%s' % k.path_from(self.outputs[0].parent) for k in self.inputs]) code = '\n\n%s\n\n' % txt self.outputs[0].write(code) def configure(self): """ Besides the configuration options, the environment variable QT4_ROOT may be used to give the location of the qt4 libraries (absolute path). The detection will use the program *pkg-config* through :py:func:`waflib.Tools.config_c.check_cfg` """ self.find_qt4_binaries() self.set_qt4_libs_to_check() self.set_qt4_defines() self.find_qt4_libraries() self.add_qt4_rpath() self.simplify_qt4_libs() @conf def find_qt4_binaries(self): env = self.env opt = Options.options qtdir = getattr(opt, 'qtdir', '') qtbin = getattr(opt, 'qtbin', '') paths = [] if qtdir: qtbin = os.path.join(qtdir, 'bin') # the qt directory has been given from QT4_ROOT - deduce the qt binary path if not qtdir: qtdir = os.environ.get('QT4_ROOT', '') qtbin = os.environ.get('QT4_BIN') or os.path.join(qtdir, 'bin') if qtbin: paths = [qtbin] # no qtdir, look in the path and in /usr/local/Trolltech if not qtdir: paths = os.environ.get('PATH', '').split(os.pathsep) paths.append('/usr/share/qt4/bin/') try: lst = Utils.listdir('/usr/local/Trolltech/') except OSError: pass else: if lst: lst.sort() lst.reverse() # keep the highest version qtdir = '/usr/local/Trolltech/%s/' % lst[0] qtbin = os.path.join(qtdir, 'bin') paths.append(qtbin) # at the end, try to find qmake in the paths given # keep the one with the highest version cand = None prev_ver = ['4', '0', '0'] for qmk in ('qmake-qt4', 'qmake4', 'qmake'): try: qmake = self.find_program(qmk, path_list=paths) except self.errors.ConfigurationError: pass else: try: version = self.cmd_and_log(qmake + ['-query', 'QT_VERSION']).strip() except self.errors.WafError: pass else: if version: new_ver = version.split('.') if new_ver > prev_ver: cand = qmake prev_ver = new_ver if cand: self.env.QMAKE = cand else: self.fatal('Could not find qmake for qt4') qtbin = self.cmd_and_log(self.env.QMAKE + ['-query', 'QT_INSTALL_BINS']).strip() + os.sep def find_bin(lst, var): if var in env: return for f in lst: try: ret = self.find_program(f, path_list=paths) except self.errors.ConfigurationError: pass else: env[var]=ret break find_bin(['uic-qt3', 'uic3'], 'QT_UIC3') find_bin(['uic-qt4', 'uic'], 'QT_UIC') if not env.QT_UIC: self.fatal('cannot find the uic compiler for qt4') self.start_msg('Checking for uic version') uicver = self.cmd_and_log(env.QT_UIC + ["-version"], output=Context.BOTH) uicver = ''.join(uicver).strip() uicver = uicver.replace('Qt User Interface Compiler ','').replace('User Interface Compiler for Qt', '') self.end_msg(uicver) if uicver.find(' 3.') != -1: self.fatal('this uic compiler is for qt3, add uic for qt4 to your path') find_bin(['moc-qt4', 'moc'], 'QT_MOC') find_bin(['rcc-qt4', 'rcc'], 'QT_RCC') find_bin(['lrelease-qt4', 'lrelease'], 'QT_LRELEASE') find_bin(['lupdate-qt4', 'lupdate'], 'QT_LUPDATE') env['UIC3_ST']= '%s -o %s' env['UIC_ST'] = '%s -o %s' env['MOC_ST'] = '-o' env['ui_PATTERN'] = 'ui_%s.h' env['QT_LRELEASE_FLAGS'] = ['-silent'] env.MOCCPPPATH_ST = '-I%s' env.MOCDEFINES_ST = '-D%s' @conf def find_qt4_libraries(self): qtlibs = getattr(Options.options, 'qtlibs', None) or os.environ.get("QT4_LIBDIR") if not qtlibs: try: qtlibs = self.cmd_and_log(self.env.QMAKE + ['-query', 'QT_INSTALL_LIBS']).strip() except Errors.WafError: qtdir = self.cmd_and_log(self.env.QMAKE + ['-query', 'QT_INSTALL_PREFIX']).strip() + os.sep qtlibs = os.path.join(qtdir, 'lib') self.msg('Found the Qt4 libraries in', qtlibs) qtincludes = os.environ.get("QT4_INCLUDES") or self.cmd_and_log(self.env.QMAKE + ['-query', 'QT_INSTALL_HEADERS']).strip() env = self.env if not 'PKG_CONFIG_PATH' in os.environ: os.environ['PKG_CONFIG_PATH'] = '%s:%s/pkgconfig:/usr/lib/qt4/lib/pkgconfig:/opt/qt4/lib/pkgconfig:/usr/lib/qt4/lib:/opt/qt4/lib' % (qtlibs, qtlibs) try: if os.environ.get("QT4_XCOMPILE"): raise self.errors.ConfigurationError() self.check_cfg(atleast_pkgconfig_version='0.1') except self.errors.ConfigurationError: for i in self.qt4_vars: uselib = i.upper() if Utils.unversioned_sys_platform() == "darwin": # Since at least qt 4.7.3 each library locates in separate directory frameworkName = i + ".framework" qtDynamicLib = os.path.join(qtlibs, frameworkName, i) if os.path.exists(qtDynamicLib): env.append_unique('FRAMEWORK_' + uselib, i) self.msg('Checking for %s' % i, qtDynamicLib, 'GREEN') else: self.msg('Checking for %s' % i, False, 'YELLOW') env.append_unique('INCLUDES_' + uselib, os.path.join(qtlibs, frameworkName, 'Headers')) elif env.DEST_OS != "win32": qtDynamicLib = os.path.join(qtlibs, "lib" + i + ".so") qtStaticLib = os.path.join(qtlibs, "lib" + i + ".a") if os.path.exists(qtDynamicLib): env.append_unique('LIB_' + uselib, i) self.msg('Checking for %s' % i, qtDynamicLib, 'GREEN') elif os.path.exists(qtStaticLib): env.append_unique('LIB_' + uselib, i) self.msg('Checking for %s' % i, qtStaticLib, 'GREEN') else: self.msg('Checking for %s' % i, False, 'YELLOW') env.append_unique('LIBPATH_' + uselib, qtlibs) env.append_unique('INCLUDES_' + uselib, qtincludes) env.append_unique('INCLUDES_' + uselib, os.path.join(qtincludes, i)) else: # Release library names are like QtCore4 for k in ("lib%s.a", "lib%s4.a", "%s.lib", "%s4.lib"): lib = os.path.join(qtlibs, k % i) if os.path.exists(lib): env.append_unique('LIB_' + uselib, i + k[k.find("%s") + 2 : k.find('.')]) self.msg('Checking for %s' % i, lib, 'GREEN') break else: self.msg('Checking for %s' % i, False, 'YELLOW') env.append_unique('LIBPATH_' + uselib, qtlibs) env.append_unique('INCLUDES_' + uselib, qtincludes) env.append_unique('INCLUDES_' + uselib, os.path.join(qtincludes, i)) # Debug library names are like QtCore4d uselib = i.upper() + "_debug" for k in ("lib%sd.a", "lib%sd4.a", "%sd.lib", "%sd4.lib"): lib = os.path.join(qtlibs, k % i) if os.path.exists(lib): env.append_unique('LIB_' + uselib, i + k[k.find("%s") + 2 : k.find('.')]) self.msg('Checking for %s' % i, lib, 'GREEN') break else: self.msg('Checking for %s' % i, False, 'YELLOW') env.append_unique('LIBPATH_' + uselib, qtlibs) env.append_unique('INCLUDES_' + uselib, qtincludes) env.append_unique('INCLUDES_' + uselib, os.path.join(qtincludes, i)) else: for i in self.qt4_vars_debug + self.qt4_vars: self.check_cfg(package=i, args='--cflags --libs', mandatory=False) @conf def simplify_qt4_libs(self): # the libpaths make really long command-lines # remove the qtcore ones from qtgui, etc env = self.env def process_lib(vars_, coreval): for d in vars_: var = d.upper() if var == 'QTCORE': continue value = env['LIBPATH_'+var] if value: core = env[coreval] accu = [] for lib in value: if lib in core: continue accu.append(lib) env['LIBPATH_'+var] = accu process_lib(self.qt4_vars, 'LIBPATH_QTCORE') process_lib(self.qt4_vars_debug, 'LIBPATH_QTCORE_DEBUG') @conf def add_qt4_rpath(self): # rpath if wanted env = self.env if getattr(Options.options, 'want_rpath', False): def process_rpath(vars_, coreval): for d in vars_: var = d.upper() value = env['LIBPATH_'+var] if value: core = env[coreval] accu = [] for lib in value: if var != 'QTCORE': if lib in core: continue accu.append('-Wl,--rpath='+lib) env['RPATH_'+var] = accu process_rpath(self.qt4_vars, 'LIBPATH_QTCORE') process_rpath(self.qt4_vars_debug, 'LIBPATH_QTCORE_DEBUG') @conf def set_qt4_libs_to_check(self): if not hasattr(self, 'qt4_vars'): self.qt4_vars = QT4_LIBS self.qt4_vars = Utils.to_list(self.qt4_vars) if not hasattr(self, 'qt4_vars_debug'): self.qt4_vars_debug = [a + '_debug' for a in self.qt4_vars] self.qt4_vars_debug = Utils.to_list(self.qt4_vars_debug) @conf def set_qt4_defines(self): if sys.platform != 'win32': return for x in self.qt4_vars: y = x[2:].upper() self.env.append_unique('DEFINES_%s' % x.upper(), 'QT_%s_LIB' % y) self.env.append_unique('DEFINES_%s_DEBUG' % x.upper(), 'QT_%s_LIB' % y) def options(opt): """ Command-line options """ opt.add_option('--want-rpath', action='store_true', default=False, dest='want_rpath', help='enable the rpath for qt libraries') opt.add_option('--header-ext', type='string', default='', help='header extension for moc files', dest='qt_header_ext') for i in 'qtdir qtbin qtlibs'.split(): opt.add_option('--'+i, type='string', default='', dest=i) opt.add_option('--translate', action="store_true", help="collect translation strings", dest="trans_qt4", default=False) tdb-1.4.2/third_party/waf/waflib/extras/relocation.py0000660000000000000000000000433313444661622022627 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 """ Waf 1.6 Try to detect if the project directory was relocated, and if it was, change the node representing the project directory. Just call: waf configure build Note that if the project directory name changes, the signatures for the tasks using files in that directory will change, causing a partial build. """ import os from waflib import Build, ConfigSet, Task, Utils, Errors from waflib.TaskGen import feature, after_method EXTRA_LOCK = '.old_srcdir' old1 = Build.BuildContext.store def store(self): old1(self) db = os.path.join(self.variant_dir, EXTRA_LOCK) env = ConfigSet.ConfigSet() env.SRCDIR = self.srcnode.abspath() env.store(db) Build.BuildContext.store = store old2 = Build.BuildContext.init_dirs def init_dirs(self): if not (os.path.isabs(self.top_dir) and os.path.isabs(self.out_dir)): raise Errors.WafError('The project was not configured: run "waf configure" first!') srcdir = None db = os.path.join(self.variant_dir, EXTRA_LOCK) env = ConfigSet.ConfigSet() try: env.load(db) srcdir = env.SRCDIR except: pass if srcdir: d = self.root.find_node(srcdir) if d and srcdir != self.top_dir and getattr(d, 'children', ''): srcnode = self.root.make_node(self.top_dir) print("relocating the source directory %r -> %r" % (srcdir, self.top_dir)) srcnode.children = {} for (k, v) in d.children.items(): srcnode.children[k] = v v.parent = srcnode d.children = {} old2(self) Build.BuildContext.init_dirs = init_dirs def uid(self): try: return self.uid_ except AttributeError: # this is not a real hot zone, but we want to avoid surprises here m = Utils.md5() up = m.update up(self.__class__.__name__.encode()) for x in self.inputs + self.outputs: up(x.path_from(x.ctx.srcnode).encode()) self.uid_ = m.digest() return self.uid_ Task.Task.uid = uid @feature('c', 'cxx', 'd', 'go', 'asm', 'fc', 'includes') @after_method('propagate_uselib_vars', 'process_source') def apply_incpaths(self): lst = self.to_incnodes(self.to_list(getattr(self, 'includes', [])) + self.env['INCLUDES']) self.includes_nodes = lst bld = self.bld self.env['INCPATHS'] = [x.is_child_of(bld.srcnode) and x.path_from(bld.bldnode) or x.abspath() for x in lst] tdb-1.4.2/third_party/waf/waflib/extras/remote.py0000660000000000000000000002307213527011455021757 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Remote Builds tool using rsync+ssh __author__ = "Jérôme Carretero " __copyright__ = "Jérôme Carretero, 2013" """ Simple Remote Builds ******************** This tool is an *experimental* tool (meaning, do not even try to pollute the waf bug tracker with bugs in here, contact me directly) providing simple remote builds. It uses rsync and ssh to perform the remote builds. It is intended for performing cross-compilation on platforms where a cross-compiler is either unavailable (eg. MacOS, QNX) a specific product does not exist (eg. Windows builds using Visual Studio) or simply not installed. This tool sends the sources and the waf script to the remote host, and commands the usual waf execution. There are alternatives to using this tool, such as setting up shared folders, logging on to remote machines, and building on the shared folders. Electing one method or another depends on the size of the program. Usage ===== 1. Set your wscript file so it includes a list of variants, e.g.:: from waflib import Utils top = '.' out = 'build' variants = [ 'linux_64_debug', 'linux_64_release', 'linux_32_debug', 'linux_32_release', ] from waflib.extras import remote def options(opt): # normal stuff from here on opt.load('compiler_c') def configure(conf): if not conf.variant: return # normal stuff from here on conf.load('compiler_c') def build(bld): if not bld.variant: return # normal stuff from here on bld(features='c cprogram', target='app', source='main.c') 2. Build the waf file, so it includes this tool, and put it in the current directory .. code:: bash ./waf-light --tools=remote 3. Set the host names to access the hosts: .. code:: bash export REMOTE_QNX=user@kiunix 4. Setup the ssh server and ssh keys The ssh key should not be protected by a password, or it will prompt for it every time. Create the key on the client: .. code:: bash ssh-keygen -t rsa -f foo.rsa Then copy foo.rsa.pub to the remote machine (user@kiunix:/home/user/.ssh/authorized_keys), and make sure the permissions are correct (chmod go-w ~ ~/.ssh ~/.ssh/authorized_keys) A separate key for the build processes can be set in the environment variable WAF_SSH_KEY. The tool will then use 'ssh-keyscan' to avoid prompting for remote hosts, so be warned to use this feature on internal networks only (MITM). .. code:: bash export WAF_SSH_KEY=~/foo.rsa 5. Perform the build: .. code:: bash waf configure_all build_all --remote """ import getpass, os, re, sys from collections import OrderedDict from waflib import Context, Options, Utils, ConfigSet from waflib.Build import BuildContext, CleanContext, InstallContext, UninstallContext from waflib.Configure import ConfigurationContext is_remote = False if '--remote' in sys.argv: is_remote = True sys.argv.remove('--remote') class init(Context.Context): """ Generates the *_all commands """ cmd = 'init' fun = 'init' def execute(self): for x in list(Context.g_module.variants): self.make_variant(x) lst = ['remote'] for k in Options.commands: if k.endswith('_all'): name = k.replace('_all', '') for x in Context.g_module.variants: lst.append('%s_%s' % (name, x)) else: lst.append(k) del Options.commands[:] Options.commands += lst def make_variant(self, x): for y in (BuildContext, CleanContext, InstallContext, UninstallContext): name = y.__name__.replace('Context','').lower() class tmp(y): cmd = name + '_' + x fun = 'build' variant = x class tmp(ConfigurationContext): cmd = 'configure_' + x fun = 'configure' variant = x def __init__(self, **kw): ConfigurationContext.__init__(self, **kw) self.setenv(x) class remote(BuildContext): cmd = 'remote' fun = 'build' def get_ssh_hosts(self): lst = [] for v in Context.g_module.variants: self.env.HOST = self.login_to_host(self.variant_to_login(v)) cmd = Utils.subst_vars('${SSH_KEYSCAN} -t rsa,ecdsa ${HOST}', self.env) out, err = self.cmd_and_log(cmd, output=Context.BOTH, quiet=Context.BOTH) lst.append(out.strip()) return lst def setup_private_ssh_key(self): """ When WAF_SSH_KEY points to a private key, a .ssh directory will be created in the build directory Make sure that the ssh key does not prompt for a password """ key = os.environ.get('WAF_SSH_KEY', '') if not key: return if not os.path.isfile(key): self.fatal('Key in WAF_SSH_KEY must point to a valid file') self.ssh_dir = os.path.join(self.path.abspath(), 'build', '.ssh') self.ssh_hosts = os.path.join(self.ssh_dir, 'known_hosts') self.ssh_key = os.path.join(self.ssh_dir, os.path.basename(key)) self.ssh_config = os.path.join(self.ssh_dir, 'config') for x in self.ssh_hosts, self.ssh_key, self.ssh_config: if not os.path.isfile(x): if not os.path.isdir(self.ssh_dir): os.makedirs(self.ssh_dir) Utils.writef(self.ssh_key, Utils.readf(key), 'wb') os.chmod(self.ssh_key, 448) Utils.writef(self.ssh_hosts, '\n'.join(self.get_ssh_hosts())) os.chmod(self.ssh_key, 448) Utils.writef(self.ssh_config, 'UserKnownHostsFile %s' % self.ssh_hosts, 'wb') os.chmod(self.ssh_config, 448) self.env.SSH_OPTS = ['-F', self.ssh_config, '-i', self.ssh_key] self.env.append_value('RSYNC_SEND_OPTS', '--exclude=build/.ssh') def skip_unbuildable_variant(self): # skip variants that cannot be built on this OS for k in Options.commands: a, _, b = k.partition('_') if b in Context.g_module.variants: c, _, _ = b.partition('_') if c != Utils.unversioned_sys_platform(): Options.commands.remove(k) def login_to_host(self, login): return re.sub(r'(\w+@)', '', login) def variant_to_login(self, variant): """linux_32_debug -> search env.LINUX_32 and then env.LINUX""" x = variant[:variant.rfind('_')] ret = os.environ.get('REMOTE_' + x.upper(), '') if not ret: x = x[:x.find('_')] ret = os.environ.get('REMOTE_' + x.upper(), '') if not ret: ret = '%s@localhost' % getpass.getuser() return ret def execute(self): global is_remote if not is_remote: self.skip_unbuildable_variant() else: BuildContext.execute(self) def restore(self): self.top_dir = os.path.abspath(Context.g_module.top) self.srcnode = self.root.find_node(self.top_dir) self.path = self.srcnode self.out_dir = os.path.join(self.top_dir, Context.g_module.out) self.bldnode = self.root.make_node(self.out_dir) self.bldnode.mkdir() self.env = ConfigSet.ConfigSet() def extract_groups_of_builds(self): """Return a dict mapping each variants to the commands to build""" self.vgroups = {} for x in reversed(Options.commands): _, _, variant = x.partition('_') if variant in Context.g_module.variants: try: dct = self.vgroups[variant] except KeyError: dct = self.vgroups[variant] = OrderedDict() try: dct[variant].append(x) except KeyError: dct[variant] = [x] Options.commands.remove(x) def custom_options(self, login): try: return Context.g_module.host_options[login] except (AttributeError, KeyError): return {} def recurse(self, *k, **kw): self.env.RSYNC = getattr(Context.g_module, 'rsync', 'rsync -a --chmod=u+rwx') self.env.SSH = getattr(Context.g_module, 'ssh', 'ssh') self.env.SSH_KEYSCAN = getattr(Context.g_module, 'ssh_keyscan', 'ssh-keyscan') try: self.env.WAF = getattr(Context.g_module, 'waf') except AttributeError: try: os.stat('waf') except KeyError: self.fatal('Put a waf file in the directory (./waf-light --tools=remote)') else: self.env.WAF = './waf' self.extract_groups_of_builds() self.setup_private_ssh_key() for k, v in self.vgroups.items(): task = self(rule=rsync_and_ssh, always=True) task.env.login = self.variant_to_login(k) task.env.commands = [] for opt, value in v.items(): task.env.commands += value task.env.variant = task.env.commands[0].partition('_')[2] for opt, value in self.custom_options(k): task.env[opt] = value self.jobs = len(self.vgroups) def make_mkdir_command(self, task): return Utils.subst_vars('${SSH} ${SSH_OPTS} ${login} "rm -fr ${remote_dir} && mkdir -p ${remote_dir}"', task.env) def make_send_command(self, task): return Utils.subst_vars('${RSYNC} ${RSYNC_SEND_OPTS} -e "${SSH} ${SSH_OPTS}" ${local_dir} ${login}:${remote_dir}', task.env) def make_exec_command(self, task): txt = '''${SSH} ${SSH_OPTS} ${login} "cd ${remote_dir} && ${WAF} ${commands}"''' return Utils.subst_vars(txt, task.env) def make_save_command(self, task): return Utils.subst_vars('${RSYNC} ${RSYNC_SAVE_OPTS} -e "${SSH} ${SSH_OPTS}" ${login}:${remote_dir_variant} ${build_dir}', task.env) def rsync_and_ssh(task): # remove a warning task.uid_ = id(task) bld = task.generator.bld task.env.user, _, _ = task.env.login.partition('@') task.env.hdir = Utils.to_hex(Utils.h_list((task.generator.path.abspath(), task.env.variant))) task.env.remote_dir = '~%s/wafremote/%s' % (task.env.user, task.env.hdir) task.env.local_dir = bld.srcnode.abspath() + '/' task.env.remote_dir_variant = '%s/%s/%s' % (task.env.remote_dir, Context.g_module.out, task.env.variant) task.env.build_dir = bld.bldnode.abspath() ret = task.exec_command(bld.make_mkdir_command(task)) if ret: return ret ret = task.exec_command(bld.make_send_command(task)) if ret: return ret ret = task.exec_command(bld.make_exec_command(task)) if ret: return ret ret = task.exec_command(bld.make_save_command(task)) if ret: return ret tdb-1.4.2/third_party/waf/waflib/extras/resx.py0000660000000000000000000000203213444661622021443 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 import os from waflib import Task from waflib.TaskGen import extension def configure(conf): conf.find_program(['resgen'], var='RESGEN') conf.env.RESGENFLAGS = '/useSourcePath' @extension('.resx') def resx_file(self, node): """ Bind the .resx extension to a resgen task """ if not getattr(self, 'cs_task', None): self.bld.fatal('resx_file has no link task for use %r' % self) # Given assembly 'Foo' and file 'Sub/Dir/File.resx', create 'Foo.Sub.Dir.File.resources' assembly = getattr(self, 'namespace', os.path.splitext(self.gen)[0]) res = os.path.splitext(node.path_from(self.path))[0].replace('/', '.').replace('\\', '.') out = self.path.find_or_declare(assembly + '.' + res + '.resources') tsk = self.create_task('resgen', node, out) self.cs_task.dep_nodes.extend(tsk.outputs) # dependency self.env.append_value('RESOURCES', tsk.outputs[0].bldpath()) class resgen(Task.Task): """ Compile C# resource files """ color = 'YELLOW' run_str = '${RESGEN} ${RESGENFLAGS} ${SRC} ${TGT}' tdb-1.4.2/third_party/waf/waflib/extras/review.py0000660000000000000000000002160213444661622021767 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Laurent Birtz, 2011 # moved the code into a separate tool (ita) """ There are several things here: - a different command-line option management making options persistent - the review command to display the options set Assumptions: - configuration options are not always added to the right group (and do not count on the users to do it...) - the options are persistent between the executions (waf options are NOT persistent by design), even for the configuration - when the options change, the build is invalidated (forcing a reconfiguration) """ import os, textwrap, shutil from waflib import Logs, Context, ConfigSet, Options, Build, Configure class Odict(dict): """Ordered dictionary""" def __init__(self, data=None): self._keys = [] dict.__init__(self) if data: # we were provided a regular dict if isinstance(data, dict): self.append_from_dict(data) # we were provided a tuple list elif type(data) == list: self.append_from_plist(data) # we were provided invalid input else: raise Exception("expected a dict or a tuple list") def append_from_dict(self, dict): map(self.__setitem__, dict.keys(), dict.values()) def append_from_plist(self, plist): for pair in plist: if len(pair) != 2: raise Exception("invalid pairs list") for (k, v) in plist: self.__setitem__(k, v) def __delitem__(self, key): if not key in self._keys: raise KeyError(key) dict.__delitem__(self, key) self._keys.remove(key) def __setitem__(self, key, item): dict.__setitem__(self, key, item) if key not in self._keys: self._keys.append(key) def clear(self): dict.clear(self) self._keys = [] def copy(self): return Odict(self.plist()) def items(self): return zip(self._keys, self.values()) def keys(self): return list(self._keys) # return a copy of the list def values(self): return map(self.get, self._keys) def plist(self): p = [] for k, v in self.items(): p.append( (k, v) ) return p def __str__(self): buf = [] buf.append("{ ") for k, v in self.items(): buf.append('%r : %r, ' % (k, v)) buf.append("}") return ''.join(buf) review_options = Odict() """ Ordered dictionary mapping configuration option names to their optparse option. """ review_defaults = {} """ Dictionary mapping configuration option names to their default value. """ old_review_set = None """ Review set containing the configuration values before parsing the command line. """ new_review_set = None """ Review set containing the configuration values after parsing the command line. """ class OptionsReview(Options.OptionsContext): def __init__(self, **kw): super(self.__class__, self).__init__(**kw) def prepare_config_review(self): """ Find the configuration options that are reviewable, detach their default value from their optparse object and store them into the review dictionaries. """ gr = self.get_option_group('configure options') for opt in gr.option_list: if opt.action != 'store' or opt.dest in ("out", "top"): continue review_options[opt.dest] = opt review_defaults[opt.dest] = opt.default if gr.defaults.has_key(opt.dest): del gr.defaults[opt.dest] opt.default = None def parse_args(self): self.prepare_config_review() self.parser.get_option('--prefix').help = 'installation prefix' super(OptionsReview, self).parse_args() Context.create_context('review').refresh_review_set() class ReviewContext(Context.Context): '''reviews the configuration values''' cmd = 'review' def __init__(self, **kw): super(self.__class__, self).__init__(**kw) out = Options.options.out if not out: out = getattr(Context.g_module, Context.OUT, None) if not out: out = Options.lockfile.replace('.lock-waf', '') self.build_path = (os.path.isabs(out) and self.root or self.path).make_node(out).abspath() """Path to the build directory""" self.cache_path = os.path.join(self.build_path, Build.CACHE_DIR) """Path to the cache directory""" self.review_path = os.path.join(self.cache_path, 'review.cache') """Path to the review cache file""" def execute(self): """ Display and store the review set. Invalidate the cache as required. """ if not self.compare_review_set(old_review_set, new_review_set): self.invalidate_cache() self.store_review_set(new_review_set) print(self.display_review_set(new_review_set)) def invalidate_cache(self): """Invalidate the cache to prevent bad builds.""" try: Logs.warn("Removing the cached configuration since the options have changed") shutil.rmtree(self.cache_path) except: pass def refresh_review_set(self): """ Obtain the old review set and the new review set, and import the new set. """ global old_review_set, new_review_set old_review_set = self.load_review_set() new_review_set = self.update_review_set(old_review_set) self.import_review_set(new_review_set) def load_review_set(self): """ Load and return the review set from the cache if it exists. Otherwise, return an empty set. """ if os.path.isfile(self.review_path): return ConfigSet.ConfigSet(self.review_path) return ConfigSet.ConfigSet() def store_review_set(self, review_set): """ Store the review set specified in the cache. """ if not os.path.isdir(self.cache_path): os.makedirs(self.cache_path) review_set.store(self.review_path) def update_review_set(self, old_set): """ Merge the options passed on the command line with those imported from the previous review set and return the corresponding preview set. """ # Convert value to string. It's important that 'None' maps to # the empty string. def val_to_str(val): if val == None or val == '': return '' return str(val) new_set = ConfigSet.ConfigSet() opt_dict = Options.options.__dict__ for name in review_options.keys(): # the option is specified explicitly on the command line if name in opt_dict: # if the option is the default, pretend it was never specified if val_to_str(opt_dict[name]) != val_to_str(review_defaults[name]): new_set[name] = opt_dict[name] # the option was explicitly specified in a previous command elif name in old_set: new_set[name] = old_set[name] return new_set def import_review_set(self, review_set): """ Import the actual value of the reviewable options in the option dictionary, given the current review set. """ for name in review_options.keys(): if name in review_set: value = review_set[name] else: value = review_defaults[name] setattr(Options.options, name, value) def compare_review_set(self, set1, set2): """ Return true if the review sets specified are equal. """ if len(set1.keys()) != len(set2.keys()): return False for key in set1.keys(): if not key in set2 or set1[key] != set2[key]: return False return True def display_review_set(self, review_set): """ Return the string representing the review set specified. """ term_width = Logs.get_term_cols() lines = [] for dest in review_options.keys(): opt = review_options[dest] name = ", ".join(opt._short_opts + opt._long_opts) help = opt.help actual = None if dest in review_set: actual = review_set[dest] default = review_defaults[dest] lines.append(self.format_option(name, help, actual, default, term_width)) return "Configuration:\n\n" + "\n\n".join(lines) + "\n" def format_option(self, name, help, actual, default, term_width): """ Return the string representing the option specified. """ def val_to_str(val): if val == None or val == '': return "(void)" return str(val) max_name_len = 20 sep_len = 2 w = textwrap.TextWrapper() w.width = term_width - 1 if w.width < 60: w.width = 60 out = "" # format the help out += w.fill(help) + "\n" # format the name name_len = len(name) out += Logs.colors.CYAN + name + Logs.colors.NORMAL # set the indentation used when the value wraps to the next line w.subsequent_indent = " ".rjust(max_name_len + sep_len) w.width -= (max_name_len + sep_len) # the name string is too long, switch to the next line if name_len > max_name_len: out += "\n" + w.subsequent_indent # fill the remaining of the line with spaces else: out += " ".rjust(max_name_len + sep_len - name_len) # format the actual value, if there is one if actual != None: out += Logs.colors.BOLD + w.fill(val_to_str(actual)) + Logs.colors.NORMAL + "\n" + w.subsequent_indent # format the default value default_fmt = val_to_str(default) if actual != None: default_fmt = "default: " + default_fmt out += Logs.colors.NORMAL + w.fill(default_fmt) + Logs.colors.NORMAL return out # Monkey-patch ConfigurationContext.execute() to have it store the review set. old_configure_execute = Configure.ConfigurationContext.execute def new_configure_execute(self): old_configure_execute(self) Context.create_context('review').store_review_set(new_review_set) Configure.ConfigurationContext.execute = new_configure_execute tdb-1.4.2/third_party/waf/waflib/extras/rst.py0000660000000000000000000001544713444661622021310 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Jérôme Carretero, 2013 (zougloub) """ reStructuredText support (experimental) Example:: def configure(conf): conf.load('rst') if not conf.env.RST2HTML: conf.fatal('The program rst2html is required') def build(bld): bld( features = 'rst', type = 'rst2html', # rst2html, rst2pdf, ... source = 'index.rst', # mandatory, the source deps = 'image.png', # to give additional non-trivial dependencies ) By default the tool looks for a set of programs in PATH. The tools are defined in `rst_progs`. To configure with a special program use:: $ RST2HTML=/path/to/rst2html waf configure This tool is experimental; don't hesitate to contribute to it. """ import re from waflib import Node, Utils, Task, Errors, Logs from waflib.TaskGen import feature, before_method rst_progs = "rst2html rst2xetex rst2latex rst2xml rst2pdf rst2s5 rst2man rst2odt rst2rtf".split() def parse_rst_node(task, node, nodes, names, seen, dirs=None): # TODO add extensibility, to handle custom rst include tags... if dirs is None: dirs = (node.parent,node.get_bld().parent) if node in seen: return seen.append(node) code = node.read() re_rst = re.compile(r'^\s*.. ((?P\|\S+\|) )?(?Pinclude|image|figure):: (?P.*)$', re.M) for match in re_rst.finditer(code): ipath = match.group('file') itype = match.group('type') Logs.debug('rst: visiting %s: %s', itype, ipath) found = False for d in dirs: Logs.debug('rst: looking for %s in %s', ipath, d.abspath()) found = d.find_node(ipath) if found: Logs.debug('rst: found %s as %s', ipath, found.abspath()) nodes.append((itype, found)) if itype == 'include': parse_rst_node(task, found, nodes, names, seen) break if not found: names.append((itype, ipath)) class docutils(Task.Task): """ Compile a rst file. """ def scan(self): """ A recursive regex-based scanner that finds rst dependencies. """ nodes = [] names = [] seen = [] node = self.inputs[0] if not node: return (nodes, names) parse_rst_node(self, node, nodes, names, seen) Logs.debug('rst: %r: found the following file deps: %r', self, nodes) if names: Logs.warn('rst: %r: could not find the following file deps: %r', self, names) return ([v for (t,v) in nodes], [v for (t,v) in names]) def check_status(self, msg, retcode): """ Check an exit status and raise an error with a particular message :param msg: message to display if the code is non-zero :type msg: string :param retcode: condition :type retcode: boolean """ if retcode != 0: raise Errors.WafError('%r command exit status %r' % (msg, retcode)) def run(self): """ Runs the rst compilation using docutils """ raise NotImplementedError() class rst2html(docutils): color = 'BLUE' def __init__(self, *args, **kw): docutils.__init__(self, *args, **kw) self.command = self.generator.env.RST2HTML self.attributes = ['stylesheet'] def scan(self): nodes, names = docutils.scan(self) for attribute in self.attributes: stylesheet = getattr(self.generator, attribute, None) if stylesheet is not None: ssnode = self.generator.to_nodes(stylesheet)[0] nodes.append(ssnode) Logs.debug('rst: adding dep to %s %s', attribute, stylesheet) return nodes, names def run(self): cwdn = self.outputs[0].parent src = self.inputs[0].path_from(cwdn) dst = self.outputs[0].path_from(cwdn) cmd = self.command + [src, dst] cmd += Utils.to_list(getattr(self.generator, 'options', [])) for attribute in self.attributes: stylesheet = getattr(self.generator, attribute, None) if stylesheet is not None: stylesheet = self.generator.to_nodes(stylesheet)[0] cmd += ['--%s' % attribute, stylesheet.path_from(cwdn)] return self.exec_command(cmd, cwd=cwdn.abspath()) class rst2s5(rst2html): def __init__(self, *args, **kw): rst2html.__init__(self, *args, **kw) self.command = self.generator.env.RST2S5 self.attributes = ['stylesheet'] class rst2latex(rst2html): def __init__(self, *args, **kw): rst2html.__init__(self, *args, **kw) self.command = self.generator.env.RST2LATEX self.attributes = ['stylesheet'] class rst2xetex(rst2html): def __init__(self, *args, **kw): rst2html.__init__(self, *args, **kw) self.command = self.generator.env.RST2XETEX self.attributes = ['stylesheet'] class rst2pdf(docutils): color = 'BLUE' def run(self): cwdn = self.outputs[0].parent src = self.inputs[0].path_from(cwdn) dst = self.outputs[0].path_from(cwdn) cmd = self.generator.env.RST2PDF + [src, '-o', dst] cmd += Utils.to_list(getattr(self.generator, 'options', [])) return self.exec_command(cmd, cwd=cwdn.abspath()) @feature('rst') @before_method('process_source') def apply_rst(self): """ Create :py:class:`rst` or other rst-related task objects """ if self.target: if isinstance(self.target, Node.Node): tgt = self.target elif isinstance(self.target, str): tgt = self.path.get_bld().make_node(self.target) else: self.bld.fatal("rst: Don't know how to build target name %s which is not a string or Node for %s" % (self.target, self)) else: tgt = None tsk_type = getattr(self, 'type', None) src = self.to_nodes(self.source) assert len(src) == 1 src = src[0] if tsk_type is not None and tgt is None: if tsk_type.startswith('rst2'): ext = tsk_type[4:] else: self.bld.fatal("rst: Could not detect the output file extension for %s" % self) tgt = src.change_ext('.%s' % ext) elif tsk_type is None and tgt is not None: out = tgt.name ext = out[out.rfind('.')+1:] self.type = 'rst2' + ext elif tsk_type is not None and tgt is not None: # the user knows what he wants pass else: self.bld.fatal("rst: Need to indicate task type or target name for %s" % self) deps_lst = [] if getattr(self, 'deps', None): deps = self.to_list(self.deps) for filename in deps: n = self.path.find_resource(filename) if not n: self.bld.fatal('Could not find %r for %r' % (filename, self)) if not n in deps_lst: deps_lst.append(n) try: task = self.create_task(self.type, src, tgt) except KeyError: self.bld.fatal("rst: Task of type %s not implemented (created by %s)" % (self.type, self)) task.env = self.env # add the manual dependencies if deps_lst: try: lst = self.bld.node_deps[task.uid()] for n in deps_lst: if not n in lst: lst.append(n) except KeyError: self.bld.node_deps[task.uid()] = deps_lst inst_to = getattr(self, 'install_path', None) if inst_to: self.install_task = self.add_install_files(install_to=inst_to, install_from=task.outputs[:]) self.source = [] def configure(self): """ Try to find the rst programs. Do not raise any error if they are not found. You'll have to use additional code in configure() to die if programs were not found. """ for p in rst_progs: self.find_program(p, mandatory=False) tdb-1.4.2/third_party/waf/waflib/extras/run_do_script.py0000660000000000000000000001157513527011455023343 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Hans-Martin von Gaudecker, 2012 """ Run a Stata do-script in the directory specified by **ctx.bldnode**. The first and only argument will be the name of the do-script (no extension), which can be accessed inside the do-script by the local macro `1'. Useful for keeping a log file. The tool uses the log file that is automatically kept by Stata only for error-catching purposes, it will be destroyed if the task finished without error. In case of an error in **some_script.do**, you can inspect it as **some_script.log** in the **ctx.bldnode** directory. Note that Stata will not return an error code if it exits abnormally -- catching errors relies on parsing the log file mentioned before. Should the parser behave incorrectly please send an email to hmgaudecker [at] gmail. **WARNING** The tool will not work if multiple do-scripts of the same name---but in different directories---are run at the same time! Avoid this situation. Usage:: ctx(features='run_do_script', source='some_script.do', target=['some_table.tex', 'some_figure.eps'], deps='some_data.csv') """ import os, re, sys from waflib import Task, TaskGen, Logs if sys.platform == 'darwin': STATA_COMMANDS = ['Stata64MP', 'StataMP', 'Stata64SE', 'StataSE', 'Stata64', 'Stata'] STATAFLAGS = '-e -q do' STATAENCODING = 'MacRoman' elif sys.platform.startswith('linux'): STATA_COMMANDS = ['stata-mp', 'stata-se', 'stata'] STATAFLAGS = '-b -q do' # Not sure whether this is correct... STATAENCODING = 'Latin-1' elif sys.platform.lower().startswith('win'): STATA_COMMANDS = ['StataMP-64', 'StataMP-ia', 'StataMP', 'StataSE-64', 'StataSE-ia', 'StataSE', 'Stata-64', 'Stata-ia', 'Stata.e', 'WMPSTATA', 'WSESTATA', 'WSTATA'] STATAFLAGS = '/e do' STATAENCODING = 'Latin-1' else: raise Exception("Unknown sys.platform: %s " % sys.platform) def configure(ctx): ctx.find_program(STATA_COMMANDS, var='STATACMD', errmsg="""\n No Stata executable found!\n\n If Stata is needed:\n 1) Check the settings of your system path. 2) Note we are looking for Stata executables called: %s If yours has a different name, please report to hmgaudecker [at] gmail\n Else:\n Do not load the 'run_do_script' tool in the main wscript.\n\n""" % STATA_COMMANDS) ctx.env.STATAFLAGS = STATAFLAGS ctx.env.STATAENCODING = STATAENCODING class run_do_script_base(Task.Task): """Run a Stata do-script from the bldnode directory.""" run_str = '"${STATACMD}" ${STATAFLAGS} "${SRC[0].abspath()}" "${DOFILETRUNK}"' shell = True class run_do_script(run_do_script_base): """Use the log file automatically kept by Stata for error-catching. Erase it if the task finished without error. If not, it will show up as do_script.log in the bldnode directory. """ def run(self): run_do_script_base.run(self) ret, log_tail = self.check_erase_log_file() if ret: Logs.error("""Running Stata on %r failed with code %r.\n\nCheck the log file %s, last 10 lines\n\n%s\n\n\n""", self.inputs[0], ret, self.env.LOGFILEPATH, log_tail) return ret def check_erase_log_file(self): """Parse Stata's default log file and erase it if everything okay. Parser is based on Brendan Halpin's shell script found here: http://teaching.sociology.ul.ie/bhalpin/wordpress/?p=122 """ if sys.version_info.major >= 3: kwargs = {'file': self.env.LOGFILEPATH, 'mode': 'r', 'encoding': self.env.STATAENCODING} else: kwargs = {'name': self.env.LOGFILEPATH, 'mode': 'r'} with open(**kwargs) as log: log_tail = log.readlines()[-10:] for line in log_tail: error_found = re.match(r"r\(([0-9]+)\)", line) if error_found: return error_found.group(1), ''.join(log_tail) else: pass # Only end up here if the parser did not identify an error. os.remove(self.env.LOGFILEPATH) return None, None @TaskGen.feature('run_do_script') @TaskGen.before_method('process_source') def apply_run_do_script(tg): """Task generator customising the options etc. to call Stata in batch mode for running a do-script. """ # Convert sources and targets to nodes src_node = tg.path.find_resource(tg.source) tgt_nodes = [tg.path.find_or_declare(t) for t in tg.to_list(tg.target)] tsk = tg.create_task('run_do_script', src=src_node, tgt=tgt_nodes) tsk.env.DOFILETRUNK = os.path.splitext(src_node.name)[0] tsk.env.LOGFILEPATH = os.path.join(tg.bld.bldnode.abspath(), '%s.log' % (tsk.env.DOFILETRUNK)) # dependencies (if the attribute 'deps' changes, trigger a recompilation) for x in tg.to_list(getattr(tg, 'deps', [])): node = tg.path.find_resource(x) if not node: tg.bld.fatal('Could not find dependency %r for running %r' % (x, src_node.abspath())) tsk.dep_nodes.append(node) Logs.debug('deps: found dependencies %r for running %r', tsk.dep_nodes, src_node.abspath()) # Bypass the execution of process_source by setting the source to an empty list tg.source = [] tdb-1.4.2/third_party/waf/waflib/extras/run_m_script.py0000660000000000000000000000577213444661622023204 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Hans-Martin von Gaudecker, 2012 """ Run a Matlab script. Note that the script is run in the directory where it lives -- Matlab won't allow it any other way. For error-catching purposes, keep an own log-file that is destroyed if the task finished without error. If not, it will show up as mscript_[index].log in the bldnode directory. Usage:: ctx(features='run_m_script', source='some_script.m', target=['some_table.tex', 'some_figure.eps'], deps='some_data.mat') """ import os, sys from waflib import Task, TaskGen, Logs MATLAB_COMMANDS = ['matlab'] def configure(ctx): ctx.find_program(MATLAB_COMMANDS, var='MATLABCMD', errmsg = """\n No Matlab executable found!\n\n If Matlab is needed:\n 1) Check the settings of your system path. 2) Note we are looking for Matlab executables called: %s If yours has a different name, please report to hmgaudecker [at] gmail\n Else:\n Do not load the 'run_m_script' tool in the main wscript.\n\n""" % MATLAB_COMMANDS) ctx.env.MATLABFLAGS = '-wait -nojvm -nosplash -minimize' class run_m_script_base(Task.Task): """Run a Matlab script.""" run_str = '"${MATLABCMD}" ${MATLABFLAGS} -logfile "${LOGFILEPATH}" -r "try, ${MSCRIPTTRUNK}, exit(0), catch err, disp(err.getReport()), exit(1), end"' shell = True class run_m_script(run_m_script_base): """Erase the Matlab overall log file if everything went okay, else raise an error and print its 10 last lines. """ def run(self): ret = run_m_script_base.run(self) logfile = self.env.LOGFILEPATH if ret: mode = 'r' if sys.version_info.major >= 3: mode = 'rb' with open(logfile, mode=mode) as f: tail = f.readlines()[-10:] Logs.error("""Running Matlab on %r returned the error %r\n\nCheck the log file %s, last 10 lines\n\n%s\n\n\n""", self.inputs[0], ret, logfile, '\n'.join(tail)) else: os.remove(logfile) return ret @TaskGen.feature('run_m_script') @TaskGen.before_method('process_source') def apply_run_m_script(tg): """Task generator customising the options etc. to call Matlab in batch mode for running a m-script. """ # Convert sources and targets to nodes src_node = tg.path.find_resource(tg.source) tgt_nodes = [tg.path.find_or_declare(t) for t in tg.to_list(tg.target)] tsk = tg.create_task('run_m_script', src=src_node, tgt=tgt_nodes) tsk.cwd = src_node.parent.abspath() tsk.env.MSCRIPTTRUNK = os.path.splitext(src_node.name)[0] tsk.env.LOGFILEPATH = os.path.join(tg.bld.bldnode.abspath(), '%s_%d.log' % (tsk.env.MSCRIPTTRUNK, tg.idx)) # dependencies (if the attribute 'deps' changes, trigger a recompilation) for x in tg.to_list(getattr(tg, 'deps', [])): node = tg.path.find_resource(x) if not node: tg.bld.fatal('Could not find dependency %r for running %r' % (x, src_node.abspath())) tsk.dep_nodes.append(node) Logs.debug('deps: found dependencies %r for running %r', tsk.dep_nodes, src_node.abspath()) # Bypass the execution of process_source by setting the source to an empty list tg.source = [] tdb-1.4.2/third_party/waf/waflib/extras/run_py_script.py0000660000000000000000000000741413444661622023373 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Hans-Martin von Gaudecker, 2012 """ Run a Python script in the directory specified by **ctx.bldnode**. Select a Python version by specifying the **version** keyword for the task generator instance as integer 2 or 3. Default is 3. If the build environment has an attribute "PROJECT_PATHS" with a key "PROJECT_ROOT", its value will be appended to the PYTHONPATH. Same a string passed to the optional **add_to_pythonpath** keyword (appended after the PROJECT_ROOT). Usage:: ctx(features='run_py_script', version=3, source='some_script.py', target=['some_table.tex', 'some_figure.eps'], deps='some_data.csv', add_to_pythonpath='src/some/library') """ import os, re from waflib import Task, TaskGen, Logs def configure(conf): """TODO: Might need to be updated for Windows once "PEP 397":http://www.python.org/dev/peps/pep-0397/ is settled. """ conf.find_program('python', var='PY2CMD', mandatory=False) conf.find_program('python3', var='PY3CMD', mandatory=False) if not conf.env.PY2CMD and not conf.env.PY3CMD: conf.fatal("No Python interpreter found!") class run_py_2_script(Task.Task): """Run a Python 2 script.""" run_str = '${PY2CMD} ${SRC[0].abspath()}' shell=True class run_py_3_script(Task.Task): """Run a Python 3 script.""" run_str = '${PY3CMD} ${SRC[0].abspath()}' shell=True @TaskGen.feature('run_py_script') @TaskGen.before_method('process_source') def apply_run_py_script(tg): """Task generator for running either Python 2 or Python 3 on a single script. Attributes: * source -- A **single** source node or string. (required) * target -- A single target or list of targets (nodes or strings) * deps -- A single dependency or list of dependencies (nodes or strings) * add_to_pythonpath -- A string that will be appended to the PYTHONPATH environment variable If the build environment has an attribute "PROJECT_PATHS" with a key "PROJECT_ROOT", its value will be appended to the PYTHONPATH. """ # Set the Python version to use, default to 3. v = getattr(tg, 'version', 3) if v not in (2, 3): raise ValueError("Specify the 'version' attribute for run_py_script task generator as integer 2 or 3.\n Got: %s" %v) # Convert sources and targets to nodes src_node = tg.path.find_resource(tg.source) tgt_nodes = [tg.path.find_or_declare(t) for t in tg.to_list(tg.target)] # Create the task. tsk = tg.create_task('run_py_%d_script' %v, src=src_node, tgt=tgt_nodes) # custom execution environment # TODO use a list and os.sep.join(lst) at the end instead of concatenating strings tsk.env.env = dict(os.environ) tsk.env.env['PYTHONPATH'] = tsk.env.env.get('PYTHONPATH', '') project_paths = getattr(tsk.env, 'PROJECT_PATHS', None) if project_paths and 'PROJECT_ROOT' in project_paths: tsk.env.env['PYTHONPATH'] += os.pathsep + project_paths['PROJECT_ROOT'].abspath() if getattr(tg, 'add_to_pythonpath', None): tsk.env.env['PYTHONPATH'] += os.pathsep + tg.add_to_pythonpath # Clean up the PYTHONPATH -- replace double occurrences of path separator tsk.env.env['PYTHONPATH'] = re.sub(os.pathsep + '+', os.pathsep, tsk.env.env['PYTHONPATH']) # Clean up the PYTHONPATH -- doesn't like starting with path separator if tsk.env.env['PYTHONPATH'].startswith(os.pathsep): tsk.env.env['PYTHONPATH'] = tsk.env.env['PYTHONPATH'][1:] # dependencies (if the attribute 'deps' changes, trigger a recompilation) for x in tg.to_list(getattr(tg, 'deps', [])): node = tg.path.find_resource(x) if not node: tg.bld.fatal('Could not find dependency %r for running %r' % (x, src_node.abspath())) tsk.dep_nodes.append(node) Logs.debug('deps: found dependencies %r for running %r', tsk.dep_nodes, src_node.abspath()) # Bypass the execution of process_source by setting the source to an empty list tg.source = [] tdb-1.4.2/third_party/waf/waflib/extras/run_r_script.py0000660000000000000000000000531613444661622023203 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Hans-Martin von Gaudecker, 2012 """ Run a R script in the directory specified by **ctx.bldnode**. For error-catching purposes, keep an own log-file that is destroyed if the task finished without error. If not, it will show up as rscript_[index].log in the bldnode directory. Usage:: ctx(features='run_r_script', source='some_script.r', target=['some_table.tex', 'some_figure.eps'], deps='some_data.csv') """ import os, sys from waflib import Task, TaskGen, Logs R_COMMANDS = ['RTerm', 'R', 'r'] def configure(ctx): ctx.find_program(R_COMMANDS, var='RCMD', errmsg = """\n No R executable found!\n\n If R is needed:\n 1) Check the settings of your system path. 2) Note we are looking for R executables called: %s If yours has a different name, please report to hmgaudecker [at] gmail\n Else:\n Do not load the 'run_r_script' tool in the main wscript.\n\n""" % R_COMMANDS) ctx.env.RFLAGS = 'CMD BATCH --slave' class run_r_script_base(Task.Task): """Run a R script.""" run_str = '"${RCMD}" ${RFLAGS} "${SRC[0].abspath()}" "${LOGFILEPATH}"' shell = True class run_r_script(run_r_script_base): """Erase the R overall log file if everything went okay, else raise an error and print its 10 last lines. """ def run(self): ret = run_r_script_base.run(self) logfile = self.env.LOGFILEPATH if ret: mode = 'r' if sys.version_info.major >= 3: mode = 'rb' with open(logfile, mode=mode) as f: tail = f.readlines()[-10:] Logs.error("""Running R on %r returned the error %r\n\nCheck the log file %s, last 10 lines\n\n%s\n\n\n""", self.inputs[0], ret, logfile, '\n'.join(tail)) else: os.remove(logfile) return ret @TaskGen.feature('run_r_script') @TaskGen.before_method('process_source') def apply_run_r_script(tg): """Task generator customising the options etc. to call R in batch mode for running a R script. """ # Convert sources and targets to nodes src_node = tg.path.find_resource(tg.source) tgt_nodes = [tg.path.find_or_declare(t) for t in tg.to_list(tg.target)] tsk = tg.create_task('run_r_script', src=src_node, tgt=tgt_nodes) tsk.env.LOGFILEPATH = os.path.join(tg.bld.bldnode.abspath(), '%s_%d.log' % (os.path.splitext(src_node.name)[0], tg.idx)) # dependencies (if the attribute 'deps' changes, trigger a recompilation) for x in tg.to_list(getattr(tg, 'deps', [])): node = tg.path.find_resource(x) if not node: tg.bld.fatal('Could not find dependency %r for running %r' % (x, src_node.abspath())) tsk.dep_nodes.append(node) Logs.debug('deps: found dependencies %r for running %r', tsk.dep_nodes, src_node.abspath()) # Bypass the execution of process_source by setting the source to an empty list tg.source = [] tdb-1.4.2/third_party/waf/waflib/extras/sas.py0000660000000000000000000000363213444661622021257 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Mark Coggeshall, 2010 "SAS support" import os from waflib import Task, Errors, Logs from waflib.TaskGen import feature, before_method sas_fun, _ = Task.compile_fun('sas -sysin ${SRCFILE} -log ${LOGFILE} -print ${LSTFILE}', shell=False) class sas(Task.Task): vars = ['SAS', 'SASFLAGS'] def run(task): command = 'SAS' fun = sas_fun node = task.inputs[0] logfilenode = node.change_ext('.log') lstfilenode = node.change_ext('.lst') # set the cwd task.cwd = task.inputs[0].parent.get_src().abspath() Logs.debug('runner: %r on %r', command, node) SASINPUTS = node.parent.get_bld().abspath() + os.pathsep + node.parent.get_src().abspath() + os.pathsep task.env.env = {'SASINPUTS': SASINPUTS} task.env.SRCFILE = node.abspath() task.env.LOGFILE = logfilenode.abspath() task.env.LSTFILE = lstfilenode.abspath() ret = fun(task) if ret: Logs.error('Running %s on %r returned a non-zero exit', command, node) Logs.error('SRCFILE = %r', node) Logs.error('LOGFILE = %r', logfilenode) Logs.error('LSTFILE = %r', lstfilenode) return ret @feature('sas') @before_method('process_source') def apply_sas(self): if not getattr(self, 'type', None) in ('sas',): self.type = 'sas' self.env['logdir'] = getattr(self, 'logdir', 'log') self.env['lstdir'] = getattr(self, 'lstdir', 'lst') deps_lst = [] if getattr(self, 'deps', None): deps = self.to_list(self.deps) for filename in deps: n = self.path.find_resource(filename) if not n: n = self.bld.root.find_resource(filename) if not n: raise Errors.WafError('cannot find input file %s for processing' % filename) if not n in deps_lst: deps_lst.append(n) for node in self.to_nodes(self.source): if self.type == 'sas': task = self.create_task('sas', src=node) task.dep_nodes = deps_lst self.source = [] def configure(self): self.find_program('sas', var='SAS', mandatory=False) tdb-1.4.2/third_party/waf/waflib/extras/satellite_assembly.py0000660000000000000000000000416113444661622024354 0ustar rootroot00000000000000#!/usr/bin/python # encoding: utf-8 # vim: tabstop=4 noexpandtab """ Create a satellite assembly from "*.??.txt" files. ?? stands for a language code. The projects Resources subfolder contains resources.??.txt string files for several languages. The build folder will hold the satellite assemblies as ./??/ExeName.resources.dll #gen becomes template (It is called gen because it also uses resx.py). bld(source='Resources/resources.de.txt',gen=ExeName) """ import os, re from waflib import Task from waflib.TaskGen import feature,before_method class al(Task.Task): run_str = '${AL} ${ALFLAGS}' @feature('satellite_assembly') @before_method('process_source') def satellite_assembly(self): if not getattr(self, 'gen', None): self.bld.fatal('satellite_assembly needs a template assembly provided with the "gen" parameter') res_lang = re.compile(r'(.*)\.(\w\w)\.(?:resx|txt)',flags=re.I) # self.source can contain node objects, so this will break in one way or another self.source = self.to_list(self.source) for i, x in enumerate(self.source): #x = 'resources/resources.de.resx' #x = 'resources/resources.de.txt' mo = res_lang.match(x) if mo: template = os.path.splitext(self.gen)[0] templatedir, templatename = os.path.split(template) res = mo.group(1) lang = mo.group(2) #./Resources/resources.de.resources resources = self.path.find_or_declare(res+ '.' + lang + '.resources') self.create_task('resgen', self.to_nodes(x), [resources]) #./de/Exename.resources.dll satellite = self.path.find_or_declare(os.path.join(templatedir,lang,templatename) + '.resources.dll') tsk = self.create_task('al',[resources],[satellite]) tsk.env.append_value('ALFLAGS','/template:'+os.path.join(self.path.relpath(),self.gen)) tsk.env.append_value('ALFLAGS','/embed:'+resources.relpath()) tsk.env.append_value('ALFLAGS','/culture:'+lang) tsk.env.append_value('ALFLAGS','/out:'+satellite.relpath()) self.source[i] = None # remove the None elements that we just substituted self.source = list(filter(lambda x:x, self.source)) def configure(ctx): ctx.find_program('al', var='AL', mandatory=True) ctx.load('resx') tdb-1.4.2/third_party/waf/waflib/extras/scala.py0000660000000000000000000000637713444661622021565 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2010 (ita) """ Scala support scalac outputs files a bit where it wants to """ import os from waflib import Task, Utils, Node from waflib.TaskGen import feature, before_method, after_method from waflib.Tools import ccroot ccroot.USELIB_VARS['scalac'] = set(['CLASSPATH', 'SCALACFLAGS']) from waflib.Tools import javaw @feature('scalac') @before_method('process_source') def apply_scalac(self): Utils.def_attrs(self, jarname='', classpath='', sourcepath='.', srcdir='.', jar_mf_attributes={}, jar_mf_classpath=[]) outdir = getattr(self, 'outdir', None) if outdir: if not isinstance(outdir, Node.Node): outdir = self.path.get_bld().make_node(self.outdir) else: outdir = self.path.get_bld() outdir.mkdir() self.env['OUTDIR'] = outdir.abspath() self.scalac_task = tsk = self.create_task('scalac') tmp = [] srcdir = getattr(self, 'srcdir', '') if isinstance(srcdir, Node.Node): srcdir = [srcdir] for x in Utils.to_list(srcdir): if isinstance(x, Node.Node): y = x else: y = self.path.find_dir(x) if not y: self.bld.fatal('Could not find the folder %s from %s' % (x, self.path)) tmp.append(y) tsk.srcdir = tmp # reuse some code feature('scalac')(javaw.use_javac_files) after_method('apply_scalac')(javaw.use_javac_files) feature('scalac')(javaw.set_classpath) after_method('apply_scalac', 'use_scalac_files')(javaw.set_classpath) SOURCE_RE = '**/*.scala' class scalac(javaw.javac): color = 'GREEN' vars = ['CLASSPATH', 'SCALACFLAGS', 'SCALAC', 'OUTDIR'] def runnable_status(self): """ Wait for dependent tasks to be complete, then read the file system to find the input nodes. """ for t in self.run_after: if not t.hasrun: return Task.ASK_LATER if not self.inputs: global SOURCE_RE self.inputs = [] for x in self.srcdir: self.inputs.extend(x.ant_glob(SOURCE_RE, remove=False)) return super(javaw.javac, self).runnable_status() def run(self): """ Execute the scalac compiler """ env = self.env gen = self.generator bld = gen.bld wd = bld.bldnode.abspath() def to_list(xx): if isinstance(xx, str): return [xx] return xx self.last_cmd = lst = [] lst.extend(to_list(env['SCALAC'])) lst.extend(['-classpath']) lst.extend(to_list(env['CLASSPATH'])) lst.extend(['-d']) lst.extend(to_list(env['OUTDIR'])) lst.extend(to_list(env['SCALACFLAGS'])) lst.extend([a.abspath() for a in self.inputs]) lst = [x for x in lst if x] try: self.out = self.generator.bld.cmd_and_log(lst, cwd=wd, env=env.env or None, output=0, quiet=0)[1] except: self.generator.bld.cmd_and_log(lst, cwd=wd, env=env.env or None) def configure(self): """ Detect the scalac program """ # If SCALA_HOME is set, we prepend it to the path list java_path = self.environ['PATH'].split(os.pathsep) v = self.env if 'SCALA_HOME' in self.environ: java_path = [os.path.join(self.environ['SCALA_HOME'], 'bin')] + java_path self.env['SCALA_HOME'] = [self.environ['SCALA_HOME']] for x in 'scalac scala'.split(): self.find_program(x, var=x.upper(), path_list=java_path) if 'CLASSPATH' in self.environ: v['CLASSPATH'] = self.environ['CLASSPATH'] v.SCALACFLAGS = ['-verbose'] if not v['SCALAC']: self.fatal('scalac is required for compiling scala classes') tdb-1.4.2/third_party/waf/waflib/extras/slow_qt4.py0000660000000000000000000000537613444661622022254 0ustar rootroot00000000000000#! /usr/bin/env python # Thomas Nagy, 2011 (ita) """ Create _moc.cpp files The builds are 30-40% faster when .moc files are included, you should NOT use this tool. If you really really want it: def configure(conf): conf.load('compiler_cxx qt4') conf.load('slow_qt4') See playground/slow_qt/wscript for a complete example. """ from waflib.TaskGen import extension from waflib import Task import waflib.Tools.qt4 import waflib.Tools.cxx @extension(*waflib.Tools.qt4.EXT_QT4) def cxx_hook(self, node): return self.create_compiled_task('cxx_qt', node) class cxx_qt(Task.classes['cxx']): def runnable_status(self): ret = Task.classes['cxx'].runnable_status(self) if ret != Task.ASK_LATER and not getattr(self, 'moc_done', None): try: cache = self.generator.moc_cache except AttributeError: cache = self.generator.moc_cache = {} deps = self.generator.bld.node_deps[self.uid()] for x in [self.inputs[0]] + deps: if x.read().find('Q_OBJECT') > 0: # process "foo.h -> foo.moc" only if "foo.cpp" is in the sources for the current task generator # this code will work because it is in the main thread (runnable_status) if x.name.rfind('.') > -1: # a .h file... name = x.name[:x.name.rfind('.')] for tsk in self.generator.compiled_tasks: if tsk.inputs and tsk.inputs[0].name.startswith(name): break else: # no corresponding file, continue continue # the file foo.cpp could be compiled for a static and a shared library - hence the %number in the name cxx_node = x.parent.get_bld().make_node(x.name.replace('.', '_') + '_%d_moc.cpp' % self.generator.idx) if cxx_node in cache: continue cache[cxx_node] = self tsk = Task.classes['moc'](env=self.env, generator=self.generator) tsk.set_inputs(x) tsk.set_outputs(cxx_node) if x.name.endswith('.cpp'): # moc is trying to be too smart but it is too dumb: # why forcing the #include when Q_OBJECT is in the cpp file? gen = self.generator.bld.producer gen.outstanding.append(tsk) gen.total += 1 self.set_run_after(tsk) else: cxxtsk = Task.classes['cxx'](env=self.env, generator=self.generator) cxxtsk.set_inputs(tsk.outputs) cxxtsk.set_outputs(cxx_node.change_ext('.o')) cxxtsk.set_run_after(tsk) try: self.more_tasks.extend([tsk, cxxtsk]) except AttributeError: self.more_tasks = [tsk, cxxtsk] try: link = self.generator.link_task except AttributeError: pass else: link.set_run_after(cxxtsk) link.inputs.extend(cxxtsk.outputs) link.inputs.sort(key=lambda x: x.abspath()) self.moc_done = True for t in self.run_after: if not t.hasrun: return Task.ASK_LATER return ret tdb-1.4.2/third_party/waf/waflib/extras/softlink_libs.py0000660000000000000000000000452513444661622023335 0ustar rootroot00000000000000#! /usr/bin/env python # per rosengren 2011 from waflib.TaskGen import feature, after_method from waflib.Task import Task, always_run from os.path import basename, isabs from os import tmpfile, linesep def options(opt): grp = opt.add_option_group('Softlink Libraries Options') grp.add_option('--exclude', default='/usr/lib,/lib', help='No symbolic links are created for libs within [%default]') def configure(cnf): cnf.find_program('ldd') if not cnf.env.SOFTLINK_EXCLUDE: cnf.env.SOFTLINK_EXCLUDE = cnf.options.exclude.split(',') @feature('softlink_libs') @after_method('process_rule') def add_finder(self): tgt = self.path.find_or_declare(self.target) self.create_task('sll_finder', tgt=tgt) self.create_task('sll_installer', tgt=tgt) always_run(sll_installer) class sll_finder(Task): ext_out = 'softlink_libs' def run(self): bld = self.generator.bld linked=[] target_paths = [] for g in bld.groups: for tgen in g: # FIXME it might be better to check if there is a link_task (getattr?) target_paths += [tgen.path.get_bld().bldpath()] linked += [t.outputs[0].bldpath() for t in getattr(tgen, 'tasks', []) if t.__class__.__name__ in ['cprogram', 'cshlib', 'cxxprogram', 'cxxshlib']] lib_list = [] if len(linked): cmd = [self.env.LDD] + linked # FIXME add DYLD_LIBRARY_PATH+PATH for osx+win32 ldd_env = {'LD_LIBRARY_PATH': ':'.join(target_paths + self.env.LIBPATH)} # FIXME the with syntax will not work in python 2 with tmpfile() as result: self.exec_command(cmd, env=ldd_env, stdout=result) result.seek(0) for line in result.readlines(): words = line.split() if len(words) < 3 or words[1] != '=>': continue lib = words[2] if lib == 'not': continue if any([lib.startswith(p) for p in [bld.bldnode.abspath(), '('] + self.env.SOFTLINK_EXCLUDE]): continue if not isabs(lib): continue lib_list.append(lib) lib_list = sorted(set(lib_list)) self.outputs[0].write(linesep.join(lib_list + self.env.DYNAMIC_LIBS)) return 0 class sll_installer(Task): ext_in = 'softlink_libs' def run(self): tgt = self.outputs[0] self.generator.bld.install_files('${LIBDIR}', tgt, postpone=False) lib_list=tgt.read().split() for lib in lib_list: self.generator.bld.symlink_as('${LIBDIR}/'+basename(lib), lib, postpone=False) return 0 tdb-1.4.2/third_party/waf/waflib/extras/sphinx.py0000660000000000000000000000547413527011455022003 0ustar rootroot00000000000000"""Support for Sphinx documentation This is a wrapper for sphinx-build program. Please note that sphinx-build supports only one output format which can passed to build via sphinx_output_format attribute. The default output format is html. Example wscript: def configure(cnf): conf.load('sphinx') def build(bld): bld( features='sphinx', sphinx_source='sources', # path to source directory sphinx_options='-a -v', # sphinx-build program additional options sphinx_output_format='man' # output format of sphinx documentation ) """ from waflib.Node import Node from waflib import Utils from waflib.Task import Task from waflib.TaskGen import feature, after_method def configure(cnf): """Check if sphinx-build program is available and loads gnu_dirs tool.""" cnf.find_program('sphinx-build', var='SPHINX_BUILD', mandatory=False) cnf.load('gnu_dirs') @feature('sphinx') def build_sphinx(self): """Builds sphinx sources. """ if not self.env.SPHINX_BUILD: self.bld.fatal('Program SPHINX_BUILD not defined.') if not getattr(self, 'sphinx_source', None): self.bld.fatal('Attribute sphinx_source not defined.') if not isinstance(self.sphinx_source, Node): self.sphinx_source = self.path.find_node(self.sphinx_source) if not self.sphinx_source: self.bld.fatal('Can\'t find sphinx_source: %r' % self.sphinx_source) Utils.def_attrs(self, sphinx_output_format='html') self.env.SPHINX_OUTPUT_FORMAT = self.sphinx_output_format self.env.SPHINX_OPTIONS = getattr(self, 'sphinx_options', []) for source_file in self.sphinx_source.ant_glob('**/*'): self.bld.add_manual_dependency(self.sphinx_source, source_file) sphinx_build_task = self.create_task('SphinxBuildingTask') sphinx_build_task.set_inputs(self.sphinx_source) sphinx_build_task.set_outputs(self.path.get_bld()) # the sphinx-build results are in directory sphinx_output_directory = self.path.get_bld().make_node(self.env.SPHINX_OUTPUT_FORMAT) sphinx_output_directory.mkdir() Utils.def_attrs(self, install_path=get_install_path(self)) self.add_install_files(install_to=self.install_path, install_from=sphinx_output_directory.ant_glob('**/*'), cwd=sphinx_output_directory, relative_trick=True) def get_install_path(tg): if tg.env.SPHINX_OUTPUT_FORMAT == 'man': return tg.env.MANDIR elif tg.env.SPHINX_OUTPUT_FORMAT == 'info': return tg.env.INFODIR else: return tg.env.DOCDIR class SphinxBuildingTask(Task): color = 'BOLD' run_str = '${SPHINX_BUILD} -M ${SPHINX_OUTPUT_FORMAT} ${SRC} ${TGT} ${SPHINX_OPTIONS}' def keyword(self): return 'Compiling (%s)' % self.env.SPHINX_OUTPUT_FORMAT tdb-1.4.2/third_party/waf/waflib/extras/stale.py0000660000000000000000000000437113444661622021602 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: UTF-8 # Thomas Nagy, 2006-2015 (ita) """ Add a pre-build hook to remove build files (declared in the system) that do not have a corresponding target This can be used for example to remove the targets that have changed name without performing a full 'waf clean' Of course, it will only work if there are no dynamically generated nodes/tasks, in which case the method will have to be modified to exclude some folders for example. Make sure to set bld.post_mode = waflib.Build.POST_AT_ONCE """ from waflib import Logs, Build from waflib.Runner import Parallel DYNAMIC_EXT = [] # add your non-cleanable files/extensions here MOC_H_EXTS = '.cpp .cxx .hpp .hxx .h'.split() def can_delete(node): """Imperfect moc cleanup which does not look for a Q_OBJECT macro in the files""" if not node.name.endswith('.moc'): return True base = node.name[:-4] p1 = node.parent.get_src() p2 = node.parent.get_bld() for k in MOC_H_EXTS: h_name = base + k n = p1.search_node(h_name) if n: return False n = p2.search_node(h_name) if n: return False # foo.cpp.moc, foo.h.moc, etc. if base.endswith(k): return False return True # recursion over the nodes to find the stale files def stale_rec(node, nodes): if node.abspath() in node.ctx.env[Build.CFG_FILES]: return if getattr(node, 'children', []): for x in node.children.values(): if x.name != "c4che": stale_rec(x, nodes) else: for ext in DYNAMIC_EXT: if node.name.endswith(ext): break else: if not node in nodes: if can_delete(node): Logs.warn('Removing stale file -> %r', node) node.delete() old = Parallel.refill_task_list def refill_task_list(self): iit = old(self) bld = self.bld # execute this operation only once if getattr(self, 'stale_done', False): return iit self.stale_done = True # this does not work in partial builds if bld.targets != '*': return iit # this does not work in dynamic builds if getattr(bld, 'post_mode') == Build.POST_AT_ONCE: return iit # obtain the nodes to use during the build nodes = [] for tasks in bld.groups: for x in tasks: try: nodes.extend(x.outputs) except AttributeError: pass stale_rec(bld.bldnode, nodes) return iit Parallel.refill_task_list = refill_task_list tdb-1.4.2/third_party/waf/waflib/extras/stracedeps.py0000660000000000000000000001000613444661622022617 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2015 (ita) """ Execute tasks through strace to obtain dependencies after the process is run. This scheme is similar to that of the Fabricate script. To use:: def configure(conf): conf.load('strace') WARNING: * This will not work when advanced scanners are needed (qt4/qt5) * The overhead of running 'strace' is significant (56s -> 1m29s) * It will not work on Windows :-) """ import os, re, threading from waflib import Task, Logs, Utils #TRACECALLS = 'trace=access,chdir,clone,creat,execve,exit_group,fork,lstat,lstat64,mkdir,open,rename,stat,stat64,symlink,vfork' TRACECALLS = 'trace=process,file' BANNED = ('/tmp', '/proc', '/sys', '/dev') s_process = r'(?:clone|fork|vfork)\(.*?(?P\d+)' s_file = r'(?P\w+)\("(?P([^"\\]|\\.)*)"(.*)' re_lines = re.compile(r'^(?P\d+)\s+(?:(?:%s)|(?:%s))\r*$' % (s_file, s_process), re.IGNORECASE | re.MULTILINE) strace_lock = threading.Lock() def configure(conf): conf.find_program('strace') def task_method(func): # Decorator function to bind/replace methods on the base Task class # # The methods Task.exec_command and Task.sig_implicit_deps already exists and are rarely overridden # we thus expect that we are the only ones doing this try: setattr(Task.Task, 'nostrace_%s' % func.__name__, getattr(Task.Task, func.__name__)) except AttributeError: pass setattr(Task.Task, func.__name__, func) return func @task_method def get_strace_file(self): try: return self.strace_file except AttributeError: pass if self.outputs: ret = self.outputs[0].abspath() + '.strace' else: ret = '%s%s%d%s' % (self.generator.bld.bldnode.abspath(), os.sep, id(self), '.strace') self.strace_file = ret return ret @task_method def get_strace_args(self): return (self.env.STRACE or ['strace']) + ['-e', TRACECALLS, '-f', '-o', self.get_strace_file()] @task_method def exec_command(self, cmd, **kw): bld = self.generator.bld if not 'cwd' in kw: kw['cwd'] = self.get_cwd() args = self.get_strace_args() fname = self.get_strace_file() if isinstance(cmd, list): cmd = args + cmd else: cmd = '%s %s' % (' '.join(args), cmd) try: ret = bld.exec_command(cmd, **kw) finally: if not ret: self.parse_strace_deps(fname, kw['cwd']) return ret @task_method def sig_implicit_deps(self): # bypass the scanner functions return @task_method def parse_strace_deps(self, path, cwd): # uncomment the following line to disable the dependencies and force a file scan # return try: cnt = Utils.readf(path) finally: try: os.remove(path) except OSError: pass if not isinstance(cwd, str): cwd = cwd.abspath() nodes = [] bld = self.generator.bld try: cache = bld.strace_cache except AttributeError: cache = bld.strace_cache = {} # chdir and relative paths pid_to_cwd = {} global BANNED done = set() for m in re.finditer(re_lines, cnt): # scraping the output of strace pid = m.group('pid') if m.group('npid'): npid = m.group('npid') pid_to_cwd[npid] = pid_to_cwd.get(pid, cwd) continue p = m.group('path').replace('\\"', '"') if p == '.' or m.group().find('= -1 ENOENT') > -1: # just to speed it up a bit continue if not os.path.isabs(p): p = os.path.join(pid_to_cwd.get(pid, cwd), p) call = m.group('call') if call == 'chdir': pid_to_cwd[pid] = p continue if p in done: continue done.add(p) for x in BANNED: if p.startswith(x): break else: if p.endswith('/') or os.path.isdir(p): continue try: node = cache[p] except KeyError: strace_lock.acquire() try: cache[p] = node = bld.root.find_node(p) if not node: continue finally: strace_lock.release() nodes.append(node) # record the dependencies then force the task signature recalculation for next time if Logs.verbose: Logs.debug('deps: real scanner for %r returned %r', self, nodes) bld = self.generator.bld bld.node_deps[self.uid()] = nodes bld.raw_deps[self.uid()] = [] try: del self.cache_sig except AttributeError: pass self.signature() tdb-1.4.2/third_party/waf/waflib/extras/swig.py0000660000000000000000000001415413527011455021436 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: UTF-8 # Petar Forai # Thomas Nagy 2008-2010 (ita) import re from waflib import Task, Logs from waflib.TaskGen import extension, feature, after_method from waflib.Configure import conf from waflib.Tools import c_preproc """ tasks have to be added dynamically: - swig interface files may be created at runtime - the module name may be unknown in advance """ SWIG_EXTS = ['.swig', '.i'] re_module = re.compile(r'%module(?:\s*\(.*\))?\s+(.+)', re.M) re_1 = re.compile(r'^%module.*?\s+([\w]+)\s*?$', re.M) re_2 = re.compile(r'[#%](?:include|import(?:\(module=".*"\))+|python(?:begin|code)) [<"](.*)[">]', re.M) class swig(Task.Task): color = 'BLUE' run_str = '${SWIG} ${SWIGFLAGS} ${SWIGPATH_ST:INCPATHS} ${SWIGDEF_ST:DEFINES} ${SRC}' ext_out = ['.h'] # might produce .h files although it is not mandatory vars = ['SWIG_VERSION', 'SWIGDEPS'] def runnable_status(self): for t in self.run_after: if not t.hasrun: return Task.ASK_LATER if not getattr(self, 'init_outputs', None): self.init_outputs = True if not getattr(self, 'module', None): # search the module name txt = self.inputs[0].read() m = re_module.search(txt) if not m: raise ValueError("could not find the swig module name") self.module = m.group(1) swig_c(self) # add the language-specific output files as nodes # call funs in the dict swig_langs for x in self.env['SWIGFLAGS']: # obtain the language x = x[1:] try: fun = swig_langs[x] except KeyError: pass else: fun(self) return super(swig, self).runnable_status() def scan(self): "scan for swig dependencies, climb the .i files" lst_src = [] seen = [] missing = [] to_see = [self.inputs[0]] while to_see: node = to_see.pop(0) if node in seen: continue seen.append(node) lst_src.append(node) # read the file code = node.read() code = c_preproc.re_nl.sub('', code) code = c_preproc.re_cpp.sub(c_preproc.repl, code) # find .i files and project headers names = re_2.findall(code) for n in names: for d in self.generator.includes_nodes + [node.parent]: u = d.find_resource(n) if u: to_see.append(u) break else: missing.append(n) return (lst_src, missing) # provide additional language processing swig_langs = {} def swigf(fun): swig_langs[fun.__name__.replace('swig_', '')] = fun return fun swig.swigf = swigf def swig_c(self): ext = '.swigwrap_%d.c' % self.generator.idx flags = self.env['SWIGFLAGS'] if '-c++' in flags: ext += 'xx' out_node = self.inputs[0].parent.find_or_declare(self.module + ext) if '-c++' in flags: c_tsk = self.generator.cxx_hook(out_node) else: c_tsk = self.generator.c_hook(out_node) c_tsk.set_run_after(self) # transfer weights from swig task to c task if getattr(self, 'weight', None): c_tsk.weight = self.weight if getattr(self, 'tree_weight', None): c_tsk.tree_weight = self.tree_weight try: self.more_tasks.append(c_tsk) except AttributeError: self.more_tasks = [c_tsk] try: ltask = self.generator.link_task except AttributeError: pass else: ltask.set_run_after(c_tsk) # setting input nodes does not declare the build order # because the build already started, but it sets # the dependency to enable rebuilds ltask.inputs.append(c_tsk.outputs[0]) self.outputs.append(out_node) if not '-o' in self.env['SWIGFLAGS']: self.env.append_value('SWIGFLAGS', ['-o', self.outputs[0].abspath()]) @swigf def swig_python(tsk): node = tsk.inputs[0].parent if tsk.outdir: node = tsk.outdir tsk.set_outputs(node.find_or_declare(tsk.module+'.py')) @swigf def swig_ocaml(tsk): node = tsk.inputs[0].parent if tsk.outdir: node = tsk.outdir tsk.set_outputs(node.find_or_declare(tsk.module+'.ml')) tsk.set_outputs(node.find_or_declare(tsk.module+'.mli')) @extension(*SWIG_EXTS) def i_file(self, node): # the task instance tsk = self.create_task('swig') tsk.set_inputs(node) tsk.module = getattr(self, 'swig_module', None) flags = self.to_list(getattr(self, 'swig_flags', [])) tsk.env.append_value('SWIGFLAGS', flags) tsk.outdir = None if '-outdir' in flags: outdir = flags[flags.index('-outdir')+1] outdir = tsk.generator.bld.bldnode.make_node(outdir) outdir.mkdir() tsk.outdir = outdir @feature('c', 'cxx', 'd', 'fc', 'asm') @after_method('apply_link', 'process_source') def enforce_swig_before_link(self): try: link_task = self.link_task except AttributeError: pass else: for x in self.tasks: if x.__class__.__name__ == 'swig': link_task.run_after.add(x) @conf def check_swig_version(conf, minver=None): """ Check if the swig tool is found matching a given minimum version. minver should be a tuple, eg. to check for swig >= 1.3.28 pass (1,3,28) as minver. If successful, SWIG_VERSION is defined as 'MAJOR.MINOR' (eg. '1.3') of the actual swig version found. :param minver: minimum version :type minver: tuple of int :return: swig version :rtype: tuple of int """ assert minver is None or isinstance(minver, tuple) swigbin = conf.env['SWIG'] if not swigbin: conf.fatal('could not find the swig executable') # Get swig version string cmd = swigbin + ['-version'] Logs.debug('swig: Running swig command %r', cmd) reg_swig = re.compile(r'SWIG Version\s(.*)', re.M) swig_out = conf.cmd_and_log(cmd) swigver_tuple = tuple([int(s) for s in reg_swig.findall(swig_out)[0].split('.')]) # Compare swig version with the minimum required result = (minver is None) or (swigver_tuple >= minver) if result: # Define useful environment variables swigver = '.'.join([str(x) for x in swigver_tuple[:2]]) conf.env['SWIG_VERSION'] = swigver # Feedback swigver_full = '.'.join(map(str, swigver_tuple[:3])) if minver is None: conf.msg('Checking for swig version', swigver_full) else: minver_str = '.'.join(map(str, minver)) conf.msg('Checking for swig version >= %s' % (minver_str,), swigver_full, color=result and 'GREEN' or 'YELLOW') if not result: conf.fatal('The swig version is too old, expecting %r' % (minver,)) return swigver_tuple def configure(conf): conf.find_program('swig', var='SWIG') conf.env.SWIGPATH_ST = '-I%s' conf.env.SWIGDEF_ST = '-D%s' tdb-1.4.2/third_party/waf/waflib/extras/syms.py0000660000000000000000000000621013527011455021452 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 """ This tool supports the export_symbols_regex to export the symbols in a shared library. by default, all symbols are exported by gcc, and nothing by msvc. to use the tool, do something like: def build(ctx): ctx(features='c cshlib syms', source='a.c b.c', export_symbols_regex='mylib_.*', target='testlib') only the symbols starting with 'mylib_' will be exported. """ import re from waflib.Context import STDOUT from waflib.Task import Task from waflib.Errors import WafError from waflib.TaskGen import feature, after_method class gen_sym(Task): def run(self): obj = self.inputs[0] kw = {} reg = getattr(self.generator, 'export_symbols_regex', '.+?') if 'msvc' in (self.env.CC_NAME, self.env.CXX_NAME): re_nm = re.compile(r'External\s+\|\s+_(?P%s)\b' % reg) cmd = (self.env.DUMPBIN or ['dumpbin']) + ['/symbols', obj.abspath()] else: if self.env.DEST_BINFMT == 'pe': #gcc uses nm, and has a preceding _ on windows re_nm = re.compile(r'(T|D)\s+_(?P%s)\b' % reg) elif self.env.DEST_BINFMT=='mac-o': re_nm=re.compile(r'(T|D)\s+(?P_?(%s))\b' % reg) else: re_nm = re.compile(r'(T|D)\s+(?P%s)\b' % reg) cmd = (self.env.NM or ['nm']) + ['-g', obj.abspath()] syms = [m.group('symbol') for m in re_nm.finditer(self.generator.bld.cmd_and_log(cmd, quiet=STDOUT, **kw))] self.outputs[0].write('%r' % syms) class compile_sym(Task): def run(self): syms = {} for x in self.inputs: slist = eval(x.read()) for s in slist: syms[s] = 1 lsyms = list(syms.keys()) lsyms.sort() if self.env.DEST_BINFMT == 'pe': self.outputs[0].write('EXPORTS\n' + '\n'.join(lsyms)) elif self.env.DEST_BINFMT == 'elf': self.outputs[0].write('{ global:\n' + ';\n'.join(lsyms) + ";\nlocal: *; };\n") elif self.env.DEST_BINFMT=='mac-o': self.outputs[0].write('\n'.join(lsyms) + '\n') else: raise WafError('NotImplemented') @feature('syms') @after_method('process_source', 'process_use', 'apply_link', 'process_uselib_local', 'propagate_uselib_vars') def do_the_symbol_stuff(self): def_node = self.path.find_or_declare(getattr(self, 'sym_file', self.target + '.def')) compiled_tasks = getattr(self, 'compiled_tasks', None) if compiled_tasks: ins = [x.outputs[0] for x in compiled_tasks] self.gen_sym_tasks = [self.create_task('gen_sym', x, x.change_ext('.%d.sym' % self.idx)) for x in ins] self.create_task('compile_sym', [x.outputs[0] for x in self.gen_sym_tasks], def_node) link_task = getattr(self, 'link_task', None) if link_task: self.link_task.dep_nodes.append(def_node) if 'msvc' in (self.env.CC_NAME, self.env.CXX_NAME): self.link_task.env.append_value('LINKFLAGS', ['/def:' + def_node.bldpath()]) elif self.env.DEST_BINFMT == 'pe': # gcc on windows takes *.def as an additional input self.link_task.inputs.append(def_node) elif self.env.DEST_BINFMT == 'elf': self.link_task.env.append_value('LINKFLAGS', ['-Wl,-version-script', '-Wl,' + def_node.bldpath()]) elif self.env.DEST_BINFMT=='mac-o': self.link_task.env.append_value('LINKFLAGS',['-Wl,-exported_symbols_list,' + def_node.bldpath()]) else: raise WafError('NotImplemented') tdb-1.4.2/third_party/waf/waflib/extras/ticgt.py0000660000000000000000000002240213444661622021577 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Texas Instruments code generator support (experimental) # When reporting issues, please directly assign the bug to the maintainer. __author__ = __maintainer__ = "Jérôme Carretero " __copyright__ = "Jérôme Carretero, 2012" """ TI cgt6x is a compiler suite for TI DSPs. The toolchain does pretty weird things, and I'm sure I'm missing some of them. But still, the tool saves time. What this tool does is: - create a TI compiler environment - create TI compiler features, to handle some specifics about this compiler It has a few idiosyncracies, such as not giving the liberty of the .o file names - automatically activate them when using the TI compiler - handle the tconf tool The tool TODO: - the set_platform_flags() function is not nice - more tests - broaden tool scope, if needed """ import os, re from waflib import Options, Utils, Task, TaskGen from waflib.Tools import c, ccroot, c_preproc from waflib.Configure import conf from waflib.TaskGen import feature, before_method from waflib.Tools.c import cprogram opj = os.path.join @conf def find_ticc(conf): conf.find_program(['cl6x'], var='CC', path_list=opj(getattr(Options.options, 'ti-cgt-dir', ""), 'bin')) conf.env.CC_NAME = 'ticc' @conf def find_tild(conf): conf.find_program(['lnk6x'], var='LINK_CC', path_list=opj(getattr(Options.options, 'ti-cgt-dir', ""), 'bin')) conf.env.LINK_CC_NAME = 'tild' @conf def find_tiar(conf): conf.find_program(['ar6x'], var='AR', path_list=opj(getattr(Options.options, 'ti-cgt-dir', ""), 'bin')) conf.env.AR_NAME = 'tiar' conf.env.ARFLAGS = 'qru' @conf def ticc_common_flags(conf): v = conf.env if not v['LINK_CC']: v['LINK_CC'] = v['CC'] v['CCLNK_SRC_F'] = [] v['CCLNK_TGT_F'] = ['-o'] v['CPPPATH_ST'] = '-I%s' v['DEFINES_ST'] = '-d%s' v['LIB_ST'] = '-l%s' # template for adding libs v['LIBPATH_ST'] = '-i%s' # template for adding libpaths v['STLIB_ST'] = '-l=%s.lib' v['STLIBPATH_ST'] = '-i%s' # program v['cprogram_PATTERN'] = '%s.out' # static lib #v['LINKFLAGS_cstlib'] = ['-Wl,-Bstatic'] v['cstlib_PATTERN'] = '%s.lib' def configure(conf): v = conf.env v.TI_CGT_DIR = getattr(Options.options, 'ti-cgt-dir', "") v.TI_DSPLINK_DIR = getattr(Options.options, 'ti-dsplink-dir', "") v.TI_BIOSUTILS_DIR = getattr(Options.options, 'ti-biosutils-dir', "") v.TI_DSPBIOS_DIR = getattr(Options.options, 'ti-dspbios-dir', "") v.TI_XDCTOOLS_DIR = getattr(Options.options, 'ti-xdctools-dir', "") conf.find_ticc() conf.find_tiar() conf.find_tild() conf.ticc_common_flags() conf.cc_load_tools() conf.cc_add_flags() conf.link_add_flags() conf.find_program(['tconf'], var='TCONF', path_list=v.TI_XDCTOOLS_DIR) conf.env.TCONF_INCLUDES += [ opj(conf.env.TI_DSPBIOS_DIR, 'packages'), ] conf.env.INCLUDES += [ opj(conf.env.TI_CGT_DIR, 'include'), ] conf.env.LIBPATH += [ opj(conf.env.TI_CGT_DIR, "lib"), ] conf.env.INCLUDES_DSPBIOS += [ opj(conf.env.TI_DSPBIOS_DIR, 'packages', 'ti', 'bios', 'include'), ] conf.env.LIBPATH_DSPBIOS += [ opj(conf.env.TI_DSPBIOS_DIR, 'packages', 'ti', 'bios', 'lib'), ] conf.env.INCLUDES_DSPLINK += [ opj(conf.env.TI_DSPLINK_DIR, 'dsplink', 'dsp', 'inc'), ] @conf def ti_set_debug(cfg, debug=1): """ Sets debug flags for the compiler. TODO: - for each TI CFLAG/INCLUDES/LINKFLAGS/LIBPATH replace RELEASE by DEBUG - -g --no_compress """ if debug: cfg.env.CFLAGS += "-d_DEBUG -dDEBUG -dDDSP_DEBUG".split() @conf def ti_dsplink_set_platform_flags(cfg, splat, dsp, dspbios_ver, board): """ Sets the INCLUDES, LINKFLAGS for DSPLINK and TCONF_INCLUDES For the specific hardware. Assumes that DSPLINK was built in its own folder. :param splat: short platform name (eg. OMAPL138) :param dsp: DSP name (eg. 674X) :param dspbios_ver: string identifying DspBios version (eg. 5.XX) :param board: board name (eg. OMAPL138GEM) """ d1 = opj(cfg.env.TI_DSPLINK_DIR, 'dsplink', 'dsp', 'inc', 'DspBios', dspbios_ver) d = opj(cfg.env.TI_DSPLINK_DIR, 'dsplink', 'dsp', 'inc', 'DspBios', dspbios_ver, board) cfg.env.TCONF_INCLUDES += [d1, d] cfg.env.INCLUDES_DSPLINK += [ opj(cfg.env.TI_DSPLINK_DIR, 'dsplink', 'dsp', 'inc', dsp), d, ] cfg.env.LINKFLAGS_DSPLINK += [ opj(cfg.env.TI_DSPLINK_DIR, 'dsplink', 'dsp', 'export', 'BIN', 'DspBios', splat, board+'_0', 'RELEASE', 'dsplink%s.lib' % x) for x in ('', 'pool', 'mpcs', 'mplist', 'msg', 'data', 'notify', 'ringio') ] def options(opt): opt.add_option('--with-ti-cgt', type='string', dest='ti-cgt-dir', help = 'Specify alternate cgt root folder', default="") opt.add_option('--with-ti-biosutils', type='string', dest='ti-biosutils-dir', help = 'Specify alternate biosutils folder', default="") opt.add_option('--with-ti-dspbios', type='string', dest='ti-dspbios-dir', help = 'Specify alternate dspbios folder', default="") opt.add_option('--with-ti-dsplink', type='string', dest='ti-dsplink-dir', help = 'Specify alternate dsplink folder', default="") opt.add_option('--with-ti-xdctools', type='string', dest='ti-xdctools-dir', help = 'Specify alternate xdctools folder', default="") class ti_cprogram(cprogram): """ Link object files into a c program Changes: - the linked executable to have a relative path (because we can) - put the LIBPATH first """ run_str = '${LINK_CC} ${LIBPATH_ST:LIBPATH} ${LIB_ST:LIB} ${LINKFLAGS} ${CCLNK_SRC_F}${SRC} ${CCLNK_TGT_F}${TGT[0].bldpath()} ${RPATH_ST:RPATH} ${FRAMEWORKPATH_ST:FRAMEWORKPATH} ${FRAMEWORK_ST:FRAMEWORK} ${ARCH_ST:ARCH} ${STLIB_MARKER} ${STLIBPATH_ST:STLIBPATH} ${STLIB_ST:STLIB} ${SHLIB_MARKER} ' @feature("c") @before_method('apply_link') def use_ti_cprogram(self): """ Automatically uses ti_cprogram link process """ if 'cprogram' in self.features and self.env.CC_NAME == 'ticc': self.features.insert(0, "ti_cprogram") class ti_c(Task.Task): """ Compile task for the TI codegen compiler This compiler does not allow specifying the output file name, only the output path. """ "Compile C files into object files" run_str = '${CC} ${ARCH_ST:ARCH} ${CFLAGS} ${FRAMEWORKPATH_ST:FRAMEWORKPATH} ${CPPPATH_ST:INCPATHS} ${DEFINES_ST:DEFINES} ${SRC} -c ${OUT} ${CPPFLAGS}' vars = ['CCDEPS'] # unused variable to depend on, just in case ext_in = ['.h'] # set the build order easily by using ext_out=['.h'] scan = c_preproc.scan def create_compiled_task(self, name, node): """ Overrides ccroot.create_compiled_task to support ti_c """ out = '%s' % (node.change_ext('.obj').name) if self.env.CC_NAME == 'ticc': name = 'ti_c' task = self.create_task(name, node, node.parent.find_or_declare(out)) self.env.OUT = '-fr%s' % (node.parent.get_bld().abspath()) try: self.compiled_tasks.append(task) except AttributeError: self.compiled_tasks = [task] return task @TaskGen.extension('.c') def c_hook(self, node): "Bind the c file extension to the creation of a :py:class:`waflib.Tools.c.c` instance" if self.env.CC_NAME == 'ticc': return create_compiled_task(self, 'ti_c', node) else: return self.create_compiled_task('c', node) @feature("ti-tconf") @before_method('process_source') def apply_tconf(self): sources = [x.get_src() for x in self.to_nodes(self.source, path=self.path.get_src())] node = sources[0] assert(sources[0].name.endswith(".tcf")) if len(sources) > 1: assert(sources[1].name.endswith(".cmd")) target = getattr(self, 'target', self.source) target_node = node.get_bld().parent.find_or_declare(node.name) procid = "%d" % int(getattr(self, 'procid', 0)) importpaths = [] includes = Utils.to_list(getattr(self, 'includes', [])) for x in includes + self.env.TCONF_INCLUDES: if x == os.path.abspath(x): importpaths.append(x) else: relpath = self.path.find_node(x).path_from(target_node.parent) importpaths.append(relpath) task = self.create_task('ti_tconf', sources, target_node.change_ext('.cdb')) task.path = self.path task.includes = includes task.cwd = target_node.parent.abspath() task.env = self.env.derive() task.env["TCONFSRC"] = node.path_from(target_node.parent) task.env["TCONFINC"] = '-Dconfig.importPath=%s' % ";".join(importpaths) task.env['TCONFPROGNAME'] = '-Dconfig.programName=%s' % target task.env['PROCID'] = procid task.outputs = [ target_node.change_ext("cfg_c.c"), target_node.change_ext("cfg.s62"), target_node.change_ext("cfg.cmd"), ] create_compiled_task(self, 'ti_c', task.outputs[1]) ctask = create_compiled_task(self, 'ti_c', task.outputs[0]) ctask.env = self.env.derive() self.add_those_o_files(target_node.change_ext("cfg.cmd")) if len(sources) > 1: self.add_those_o_files(sources[1]) self.source = [] re_tconf_include = re.compile(r'(?Putils\.importFile)\("(?P.*)"\)',re.M) class ti_tconf(Task.Task): run_str = '${TCONF} ${TCONFINC} ${TCONFPROGNAME} ${TCONFSRC} ${PROCID}' color = 'PINK' def scan(self): includes = Utils.to_list(getattr(self, 'includes', [])) def deps(node): nodes, names = [], [] if node: code = Utils.readf(node.abspath()) for match in re_tconf_include.finditer(code): path = match.group('file') if path: for x in includes: filename = opj(x, path) fi = self.path.find_resource(filename) if fi: subnodes, subnames = deps(fi) nodes += subnodes names += subnames nodes.append(fi) names.append(path) break return nodes, names return deps(self.inputs[0]) tdb-1.4.2/third_party/waf/waflib/extras/unity.py0000660000000000000000000000547013444661622021643 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 """ Compile whole groups of C/C++ files at once (C and C++ files are processed independently though). To enable globally:: def options(opt): opt.load('compiler_cxx') def build(bld): bld.load('compiler_cxx unity') To enable for specific task generators only:: def build(bld): bld(features='c cprogram unity', source='main.c', ...) The file order is often significant in such builds, so it can be necessary to adjust the order of source files and the batch sizes. To control the amount of files processed in a batch per target (the default is 50):: def build(bld): bld(features='c cprogram', unity_size=20) """ from waflib import Task, Options from waflib.Tools import c_preproc from waflib import TaskGen MAX_BATCH = 50 EXTS_C = ('.c',) EXTS_CXX = ('.cpp','.cc','.cxx','.C','.c++') def options(opt): global MAX_BATCH opt.add_option('--batchsize', action='store', dest='batchsize', type='int', default=MAX_BATCH, help='default unity batch size (0 disables unity builds)') @TaskGen.taskgen_method def batch_size(self): default = getattr(Options.options, 'batchsize', MAX_BATCH) if default < 1: return 0 return getattr(self, 'unity_size', default) class unity(Task.Task): color = 'BLUE' scan = c_preproc.scan def to_include(self, node): ret = node.path_from(self.outputs[0].parent) ret = ret.replace('\\', '\\\\').replace('"', '\\"') return ret def run(self): lst = ['#include "%s"\n' % self.to_include(node) for node in self.inputs] txt = ''.join(lst) self.outputs[0].write(txt) def __str__(self): node = self.outputs[0] return node.path_from(node.ctx.launch_node()) def bind_unity(obj, cls_name, exts): if not 'mappings' in obj.__dict__: obj.mappings = dict(obj.mappings) for j in exts: fun = obj.mappings[j] if fun.__name__ == 'unity_fun': raise ValueError('Attempt to bind unity mappings multiple times %r' % j) def unity_fun(self, node): cnt = self.batch_size() if cnt <= 1: return fun(self, node) x = getattr(self, 'master_%s' % cls_name, None) if not x or len(x.inputs) >= cnt: x = self.create_task('unity') setattr(self, 'master_%s' % cls_name, x) cnt_cur = getattr(self, 'cnt_%s' % cls_name, 0) c_node = node.parent.find_or_declare('unity_%s_%d_%d.%s' % (self.idx, cnt_cur, cnt, cls_name)) x.outputs = [c_node] setattr(self, 'cnt_%s' % cls_name, cnt_cur + 1) fun(self, c_node) x.inputs.append(node) obj.mappings[j] = unity_fun @TaskGen.feature('unity') @TaskGen.before('process_source') def single_unity(self): lst = self.to_list(self.features) if 'c' in lst: bind_unity(self, 'c', EXTS_C) if 'cxx' in lst: bind_unity(self, 'cxx', EXTS_CXX) def build(bld): if bld.env.CC_NAME: bind_unity(TaskGen.task_gen, 'c', EXTS_C) if bld.env.CXX_NAME: bind_unity(TaskGen.task_gen, 'cxx', EXTS_CXX) tdb-1.4.2/third_party/waf/waflib/extras/use_config.py0000660000000000000000000001303113527011455022577 0ustar rootroot00000000000000#!/usr/bin/env python # coding=utf-8 # Mathieu Courtois - EDF R&D, 2013 - http://www.code-aster.org """ When a project has a lot of options the 'waf configure' command line can be very long and it becomes a cause of error. This tool provides a convenient way to load a set of configuration parameters from a local file or from a remote url. The configuration parameters are stored in a Python file that is imported as an extra waf tool can be. Example: $ waf configure --use-config-dir=http://www.anywhere.org --use-config=myconf1 ... The file 'myconf1' will be downloaded from 'http://www.anywhere.org' (or 'http://www.anywhere.org/wafcfg'). If the files are available locally, it could be: $ waf configure --use-config-dir=/somewhere/myconfigurations --use-config=myconf1 ... The configuration of 'myconf1.py' is automatically loaded by calling its 'configure' function. In this example, it defines environment variables and set options: def configure(self): self.env['CC'] = 'gcc-4.8' self.env.append_value('LIBPATH', [...]) self.options.perlbinary = '/usr/local/bin/perl' self.options.pyc = False The corresponding command line should have been: $ CC=gcc-4.8 LIBPATH=... waf configure --nopyc --with-perl-binary=/usr/local/bin/perl This is an extra tool, not bundled with the default waf binary. To add the use_config tool to the waf file: $ ./waf-light --tools=use_config When using this tool, the wscript will look like: def options(opt): opt.load('use_config') def configure(conf): conf.load('use_config') """ import sys import os.path as osp import os local_repo = '' """Local repository containing additional Waf tools (plugins)""" remote_repo = 'https://gitlab.com/ita1024/waf/raw/master/' """ Remote directory containing downloadable waf tools. The missing tools can be downloaded by using:: $ waf configure --download """ remote_locs = ['waflib/extras', 'waflib/Tools'] """ Remote directories for use with :py:const:`waflib.extras.use_config.remote_repo` """ try: from urllib import request except ImportError: from urllib import urlopen else: urlopen = request.urlopen from waflib import Errors, Context, Logs, Utils, Options, Configure try: from urllib.parse import urlparse except ImportError: from urlparse import urlparse DEFAULT_DIR = 'wafcfg' # add first the current wafcfg subdirectory sys.path.append(osp.abspath(DEFAULT_DIR)) def options(self): group = self.add_option_group('configure options') group.add_option('--download', dest='download', default=False, action='store_true', help='try to download the tools if missing') group.add_option('--use-config', action='store', default=None, metavar='CFG', dest='use_config', help='force the configuration parameters by importing ' 'CFG.py. Several modules may be provided (comma ' 'separated).') group.add_option('--use-config-dir', action='store', default=DEFAULT_DIR, metavar='CFG_DIR', dest='use_config_dir', help='path or url where to find the configuration file') def download_check(node): """ Hook to check for the tools which are downloaded. Replace with your function if necessary. """ pass def download_tool(tool, force=False, ctx=None): """ Download a Waf tool from the remote repository defined in :py:const:`waflib.extras.use_config.remote_repo`:: $ waf configure --download """ for x in Utils.to_list(remote_repo): for sub in Utils.to_list(remote_locs): url = '/'.join((x, sub, tool + '.py')) try: web = urlopen(url) try: if web.getcode() != 200: continue except AttributeError: pass except Exception: # on python3 urlopen throws an exception # python 2.3 does not have getcode and throws an exception to fail continue else: tmp = ctx.root.make_node(os.sep.join((Context.waf_dir, 'waflib', 'extras', tool + '.py'))) tmp.write(web.read(), 'wb') Logs.warn('Downloaded %s from %s', tool, url) download_check(tmp) try: module = Context.load_tool(tool) except Exception: Logs.warn('The tool %s from %s is unusable', tool, url) try: tmp.delete() except Exception: pass continue return module raise Errors.WafError('Could not load the Waf tool') def load_tool(tool, tooldir=None, ctx=None, with_sys_path=True): try: module = Context.load_tool_default(tool, tooldir, ctx, with_sys_path) except ImportError as e: if not ctx or not hasattr(Options.options, 'download'): Logs.error('Could not load %r during options phase (download unavailable at this point)' % tool) raise if Options.options.download: module = download_tool(tool, ctx=ctx) if not module: ctx.fatal('Could not load the Waf tool %r or download a suitable replacement from the repository (sys.path %r)\n%s' % (tool, sys.path, e)) else: ctx.fatal('Could not load the Waf tool %r from %r (try the --download option?):\n%s' % (tool, sys.path, e)) return module Context.load_tool_default = Context.load_tool Context.load_tool = load_tool Configure.download_tool = download_tool def configure(self): opts = self.options use_cfg = opts.use_config if use_cfg is None: return url = urlparse(opts.use_config_dir) kwargs = {} if url.scheme: kwargs['download'] = True kwargs['remote_url'] = url.geturl() # search first with the exact url, else try with +'/wafcfg' kwargs['remote_locs'] = ['', DEFAULT_DIR] tooldir = url.geturl() + ' ' + DEFAULT_DIR for cfg in use_cfg.split(','): Logs.pprint('NORMAL', "Searching configuration '%s'..." % cfg) self.load(cfg, tooldir=tooldir, **kwargs) self.start_msg('Checking for configuration') self.end_msg(use_cfg) tdb-1.4.2/third_party/waf/waflib/extras/valadoc.py0000660000000000000000000001053513444661622022102 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: UTF-8 # Nicolas Joseph 2009 """ ported from waf 1.5: TODO: tabs vs spaces """ from waflib import Task, Utils, Errors, Logs from waflib.TaskGen import feature VALADOC_STR = '${VALADOC}' class valadoc(Task.Task): vars = ['VALADOC', 'VALADOCFLAGS'] color = 'BLUE' after = ['cprogram', 'cstlib', 'cshlib', 'cxxprogram', 'cxxstlib', 'cxxshlib'] quiet = True # no outputs .. this is weird def __init__(self, *k, **kw): Task.Task.__init__(self, *k, **kw) self.output_dir = '' self.doclet = '' self.package_name = '' self.package_version = '' self.files = [] self.vapi_dirs = [] self.protected = True self.private = False self.inherit = False self.deps = False self.vala_defines = [] self.vala_target_glib = None self.enable_non_null_experimental = False self.force = False def run(self): if not self.env['VALADOCFLAGS']: self.env['VALADOCFLAGS'] = '' cmd = [Utils.subst_vars(VALADOC_STR, self.env)] cmd.append ('-o %s' % self.output_dir) if getattr(self, 'doclet', None): cmd.append ('--doclet %s' % self.doclet) cmd.append ('--package-name %s' % self.package_name) if getattr(self, 'package_version', None): cmd.append ('--package-version %s' % self.package_version) if getattr(self, 'packages', None): for package in self.packages: cmd.append ('--pkg %s' % package) if getattr(self, 'vapi_dirs', None): for vapi_dir in self.vapi_dirs: cmd.append ('--vapidir %s' % vapi_dir) if not getattr(self, 'protected', None): cmd.append ('--no-protected') if getattr(self, 'private', None): cmd.append ('--private') if getattr(self, 'inherit', None): cmd.append ('--inherit') if getattr(self, 'deps', None): cmd.append ('--deps') if getattr(self, 'vala_defines', None): for define in self.vala_defines: cmd.append ('--define %s' % define) if getattr(self, 'vala_target_glib', None): cmd.append ('--target-glib=%s' % self.vala_target_glib) if getattr(self, 'enable_non_null_experimental', None): cmd.append ('--enable-non-null-experimental') if getattr(self, 'force', None): cmd.append ('--force') cmd.append (' '.join ([x.abspath() for x in self.files])) return self.generator.bld.exec_command(' '.join(cmd)) @feature('valadoc') def process_valadoc(self): """ Generate API documentation from Vala source code with valadoc doc = bld( features = 'valadoc', output_dir = '../doc/html', package_name = 'vala-gtk-example', package_version = '1.0.0', packages = 'gtk+-2.0', vapi_dirs = '../vapi', force = True ) path = bld.path.find_dir ('../src') doc.files = path.ant_glob (incl='**/*.vala') """ task = self.create_task('valadoc') if getattr(self, 'output_dir', None): task.output_dir = self.path.find_or_declare(self.output_dir).abspath() else: Errors.WafError('no output directory') if getattr(self, 'doclet', None): task.doclet = self.doclet else: Errors.WafError('no doclet directory') if getattr(self, 'package_name', None): task.package_name = self.package_name else: Errors.WafError('no package name') if getattr(self, 'package_version', None): task.package_version = self.package_version if getattr(self, 'packages', None): task.packages = Utils.to_list(self.packages) if getattr(self, 'vapi_dirs', None): vapi_dirs = Utils.to_list(self.vapi_dirs) for vapi_dir in vapi_dirs: try: task.vapi_dirs.append(self.path.find_dir(vapi_dir).abspath()) except AttributeError: Logs.warn('Unable to locate Vala API directory: %r', vapi_dir) if getattr(self, 'files', None): task.files = self.files else: Errors.WafError('no input file') if getattr(self, 'protected', None): task.protected = self.protected if getattr(self, 'private', None): task.private = self.private if getattr(self, 'inherit', None): task.inherit = self.inherit if getattr(self, 'deps', None): task.deps = self.deps if getattr(self, 'vala_defines', None): task.vala_defines = Utils.to_list(self.vala_defines) if getattr(self, 'vala_target_glib', None): task.vala_target_glib = self.vala_target_glib if getattr(self, 'enable_non_null_experimental', None): task.enable_non_null_experimental = self.enable_non_null_experimental if getattr(self, 'force', None): task.force = self.force def configure(conf): conf.find_program('valadoc', errmsg='You must install valadoc for generate the API documentation') tdb-1.4.2/third_party/waf/waflib/extras/waf_xattr.py0000660000000000000000000001006013444661622022461 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 """ Use extended attributes instead of database files 1. Input files will be made writable 2. This is only for systems providing extended filesystem attributes 3. By default, hashes are calculated only if timestamp/size change (HASH_CACHE below) 4. The module enables "deep_inputs" on all tasks by propagating task signatures 5. This module also skips task signature comparisons for task code changes due to point 4. 6. This module is for Python3/Linux only, but it could be extended to Python2/other systems using the xattr library 7. For projects in which tasks always declare output files, it should be possible to store the rest of build context attributes on output files (imp_sigs, raw_deps and node_deps) but this is not done here On a simple C++ project benchmark, the variations before and after adding waf_xattr.py were observed: total build time: 20s -> 22s no-op build time: 2.4s -> 1.8s pickle file size: 2.9MB -> 2.6MB """ import os from waflib import Logs, Node, Task, Utils, Errors from waflib.Task import SKIP_ME, RUN_ME, CANCEL_ME, ASK_LATER, SKIPPED, MISSING HASH_CACHE = True SIG_VAR = 'user.waf.sig' SEP = ','.encode() TEMPLATE = '%b%d,%d'.encode() try: PermissionError except NameError: PermissionError = IOError def getxattr(self): return os.getxattr(self.abspath(), SIG_VAR) def setxattr(self, val): os.setxattr(self.abspath(), SIG_VAR, val) def h_file(self): try: ret = getxattr(self) except OSError: if HASH_CACHE: st = os.stat(self.abspath()) mtime = st.st_mtime size = st.st_size else: if len(ret) == 16: # for build directory files return ret if HASH_CACHE: # check if timestamp and mtime match to avoid re-hashing st = os.stat(self.abspath()) mtime, size = ret[16:].split(SEP) if int(1000 * st.st_mtime) == int(mtime) and st.st_size == int(size): return ret[:16] ret = Utils.h_file(self.abspath()) if HASH_CACHE: val = TEMPLATE % (ret, int(1000 * st.st_mtime), int(st.st_size)) try: setxattr(self, val) except PermissionError: os.chmod(self.abspath(), st.st_mode | 128) setxattr(self, val) return ret def runnable_status(self): bld = self.generator.bld if bld.is_install < 0: return SKIP_ME for t in self.run_after: if not t.hasrun: return ASK_LATER elif t.hasrun < SKIPPED: # a dependency has an error return CANCEL_ME # first compute the signature try: new_sig = self.signature() except Errors.TaskNotReady: return ASK_LATER if not self.outputs: # compare the signature to a signature computed previously # this part is only for tasks with no output files key = self.uid() try: prev_sig = bld.task_sigs[key] except KeyError: Logs.debug('task: task %r must run: it was never run before or the task code changed', self) return RUN_ME if new_sig != prev_sig: Logs.debug('task: task %r must run: the task signature changed', self) return RUN_ME # compare the signatures of the outputs to make a decision for node in self.outputs: try: sig = node.h_file() except EnvironmentError: Logs.debug('task: task %r must run: an output node does not exist', self) return RUN_ME if sig != new_sig: Logs.debug('task: task %r must run: an output node is stale', self) return RUN_ME return (self.always_run and RUN_ME) or SKIP_ME def post_run(self): bld = self.generator.bld sig = self.signature() for node in self.outputs: if not node.exists(): self.hasrun = MISSING self.err_msg = '-> missing file: %r' % node.abspath() raise Errors.WafError(self.err_msg) os.setxattr(node.abspath(), 'user.waf.sig', sig) if not self.outputs: # only for task with no outputs bld.task_sigs[self.uid()] = sig if not self.keep_last_cmd: try: del self.last_cmd except AttributeError: pass try: os.getxattr except AttributeError: pass else: h_file.__doc__ = Node.Node.h_file.__doc__ # keep file hashes as file attributes Node.Node.h_file = h_file # enable "deep_inputs" on all tasks Task.Task.runnable_status = runnable_status Task.Task.post_run = post_run Task.Task.sig_deep_inputs = Utils.nada tdb-1.4.2/third_party/waf/waflib/extras/why.py0000660000000000000000000000354313444661622021301 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2010 (ita) """ This tool modifies the task signature scheme to store and obtain information about the task execution (why it must run, etc):: def configure(conf): conf.load('why') After adding the tool, a full rebuild is necessary: waf clean build --zones=task """ from waflib import Task, Utils, Logs, Errors def signature(self): # compute the result one time, and suppose the scan_signature will give the good result try: return self.cache_sig except AttributeError: pass self.m = Utils.md5() self.m.update(self.hcode) id_sig = self.m.digest() # explicit deps self.m = Utils.md5() self.sig_explicit_deps() exp_sig = self.m.digest() # env vars self.m = Utils.md5() self.sig_vars() var_sig = self.m.digest() # implicit deps / scanner results self.m = Utils.md5() if self.scan: try: self.sig_implicit_deps() except Errors.TaskRescan: return self.signature() impl_sig = self.m.digest() ret = self.cache_sig = impl_sig + id_sig + exp_sig + var_sig return ret Task.Task.signature = signature old = Task.Task.runnable_status def runnable_status(self): ret = old(self) if ret == Task.RUN_ME: try: old_sigs = self.generator.bld.task_sigs[self.uid()] except (KeyError, AttributeError): Logs.debug("task: task must run as no previous signature exists") else: new_sigs = self.cache_sig def v(x): return Utils.to_hex(x) Logs.debug('Task %r', self) msgs = ['* Implicit or scanner dependency', '* Task code', '* Source file, explicit or manual dependency', '* Configuration data variable'] tmp = 'task: -> %s: %s %s' for x in range(len(msgs)): l = len(Utils.SIG_NIL) a = new_sigs[x*l : (x+1)*l] b = old_sigs[x*l : (x+1)*l] if (a != b): Logs.debug(tmp, msgs[x].ljust(35), v(a), v(b)) return ret Task.Task.runnable_status = runnable_status tdb-1.4.2/third_party/waf/waflib/extras/win32_opts.py0000660000000000000000000001114413444661622022475 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 """ Windows-specific optimizations This module can help reducing the overhead of listing files on windows (more than 10000 files). Python 3.5 already provides the listdir optimization though. """ import os from waflib import Utils, Build, Node, Logs try: TP = '%s\\*'.decode('ascii') except AttributeError: TP = '%s\\*' if Utils.is_win32: from waflib.Tools import md5_tstamp import ctypes, ctypes.wintypes FindFirstFile = ctypes.windll.kernel32.FindFirstFileW FindNextFile = ctypes.windll.kernel32.FindNextFileW FindClose = ctypes.windll.kernel32.FindClose FILE_ATTRIBUTE_DIRECTORY = 0x10 INVALID_HANDLE_VALUE = -1 UPPER_FOLDERS = ('.', '..') try: UPPER_FOLDERS = [unicode(x) for x in UPPER_FOLDERS] except NameError: pass def cached_hash_file(self): try: cache = self.ctx.cache_listdir_cache_hash_file except AttributeError: cache = self.ctx.cache_listdir_cache_hash_file = {} if id(self.parent) in cache: try: t = cache[id(self.parent)][self.name] except KeyError: raise IOError('Not a file') else: # an opportunity to list the files and the timestamps at once findData = ctypes.wintypes.WIN32_FIND_DATAW() find = FindFirstFile(TP % self.parent.abspath(), ctypes.byref(findData)) if find == INVALID_HANDLE_VALUE: cache[id(self.parent)] = {} raise IOError('Not a file') cache[id(self.parent)] = lst_files = {} try: while True: if findData.cFileName not in UPPER_FOLDERS: thatsadir = findData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY if not thatsadir: ts = findData.ftLastWriteTime d = (ts.dwLowDateTime << 32) | ts.dwHighDateTime lst_files[str(findData.cFileName)] = d if not FindNextFile(find, ctypes.byref(findData)): break except Exception: cache[id(self.parent)] = {} raise IOError('Not a file') finally: FindClose(find) t = lst_files[self.name] fname = self.abspath() if fname in Build.hashes_md5_tstamp: if Build.hashes_md5_tstamp[fname][0] == t: return Build.hashes_md5_tstamp[fname][1] try: fd = os.open(fname, os.O_BINARY | os.O_RDONLY | os.O_NOINHERIT) except OSError: raise IOError('Cannot read from %r' % fname) f = os.fdopen(fd, 'rb') m = Utils.md5() rb = 1 try: while rb: rb = f.read(200000) m.update(rb) finally: f.close() # ensure that the cache is overwritten Build.hashes_md5_tstamp[fname] = (t, m.digest()) return m.digest() Node.Node.cached_hash_file = cached_hash_file def get_bld_sig_win32(self): try: return self.ctx.hash_cache[id(self)] except KeyError: pass except AttributeError: self.ctx.hash_cache = {} self.ctx.hash_cache[id(self)] = ret = Utils.h_file(self.abspath()) return ret Node.Node.get_bld_sig = get_bld_sig_win32 def isfile_cached(self): # optimize for nt.stat calls, assuming there are many files for few folders try: cache = self.__class__.cache_isfile_cache except AttributeError: cache = self.__class__.cache_isfile_cache = {} try: c1 = cache[id(self.parent)] except KeyError: c1 = cache[id(self.parent)] = [] curpath = self.parent.abspath() findData = ctypes.wintypes.WIN32_FIND_DATAW() find = FindFirstFile(TP % curpath, ctypes.byref(findData)) if find == INVALID_HANDLE_VALUE: Logs.error("invalid win32 handle isfile_cached %r", self.abspath()) return os.path.isfile(self.abspath()) try: while True: if findData.cFileName not in UPPER_FOLDERS: thatsadir = findData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY if not thatsadir: c1.append(str(findData.cFileName)) if not FindNextFile(find, ctypes.byref(findData)): break except Exception as e: Logs.error('exception while listing a folder %r %r', self.abspath(), e) return os.path.isfile(self.abspath()) finally: FindClose(find) return self.name in c1 Node.Node.isfile_cached = isfile_cached def find_or_declare_win32(self, lst): # assuming that "find_or_declare" is called before the build starts, remove the calls to os.path.isfile if isinstance(lst, str): lst = [x for x in Utils.split_path(lst) if x and x != '.'] node = self.get_bld().search_node(lst) if node: if not node.isfile_cached(): try: node.parent.mkdir() except OSError: pass return node self = self.get_src() node = self.find_node(lst) if node: if not node.isfile_cached(): try: node.parent.mkdir() except OSError: pass return node node = self.get_bld().make_node(lst) node.parent.mkdir() return node Node.Node.find_or_declare = find_or_declare_win32 tdb-1.4.2/third_party/waf/waflib/extras/wix.py0000660000000000000000000000514313444661622021277 0ustar rootroot00000000000000#!/usr/bin/python # encoding: utf-8 # vim: tabstop=4 noexpandtab """ Windows Installer XML Tool (WiX) .wxs --- candle ---> .wxobj --- light ---> .msi bld(features='wix', some.wxs, gen='some.msi', candleflags=[..], lightflags=[..]) bld(features='wix', source=['bundle.wxs','WixBalExtension'], gen='setup.exe', candleflags=[..]) """ import os, copy from waflib import TaskGen from waflib import Task from waflib.Utils import winreg class candle(Task.Task): run_str = '${CANDLE} -nologo ${CANDLEFLAGS} -out ${TGT} ${SRC[0].abspath()}', class light(Task.Task): run_str = "${LIGHT} -nologo -b ${SRC[0].parent.abspath()} ${LIGHTFLAGS} -out ${TGT} ${SRC[0].abspath()}" @TaskGen.feature('wix') @TaskGen.before_method('process_source') def wix(self): #X.wxs -> ${SRC} for CANDLE #X.wxobj -> ${SRC} for LIGHT #X.dll -> -ext X in ${LIGHTFLAGS} #X.wxl -> wixui.wixlib -loc X.wxl in ${LIGHTFLAGS} wxobj = [] wxs = [] exts = [] wxl = [] rest = [] for x in self.source: if x.endswith('.wxobj'): wxobj.append(x) elif x.endswith('.wxs'): wxobj.append(self.path.find_or_declare(x[:-4]+'.wxobj')) wxs.append(x) elif x.endswith('.dll'): exts.append(x[:-4]) elif '.' not in x: exts.append(x) elif x.endswith('.wxl'): wxl.append(x) else: rest.append(x) self.source = self.to_nodes(rest) #.wxs cndl = self.create_task('candle', self.to_nodes(wxs), self.to_nodes(wxobj)) lght = self.create_task('light', self.to_nodes(wxobj), self.path.find_or_declare(self.gen)) cndl.env.CANDLEFLAGS = copy.copy(getattr(self,'candleflags',[])) lght.env.LIGHTFLAGS = copy.copy(getattr(self,'lightflags',[])) for x in wxl: lght.env.append_value('LIGHTFLAGS','wixui.wixlib') lght.env.append_value('LIGHTFLAGS','-loc') lght.env.append_value('LIGHTFLAGS',x) for x in exts: cndl.env.append_value('CANDLEFLAGS','-ext') cndl.env.append_value('CANDLEFLAGS',x) lght.env.append_value('LIGHTFLAGS','-ext') lght.env.append_value('LIGHTFLAGS',x) #wix_bin_path() def wix_bin_path(): basekey = r"SOFTWARE\Microsoft\.NETFramework\AssemblyFolders" query = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, basekey) cnt=winreg.QueryInfoKey(query)[0] thiskey = r'C:\Program Files (x86)\WiX Toolset v3.10\SDK' for i in range(cnt-1,-1,-1): thiskey = winreg.EnumKey(query,i) if 'WiX' in thiskey: break winreg.CloseKey(query) return os.path.normpath(winreg.QueryValue(winreg.HKEY_LOCAL_MACHINE, basekey+r'\\'+thiskey)+'..\\bin') def configure(ctx): path_list=[wix_bin_path()] ctx.find_program('candle', var='CANDLE', mandatory=True, path_list = path_list) ctx.find_program('light', var='LIGHT', mandatory=True, path_list = path_list) tdb-1.4.2/third_party/waf/waflib/extras/xcode6.py0000660000000000000000000005711013527011455021654 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # XCode 3/XCode 4/XCode 6/Xcode 7 generator for Waf # Based on work by Nicolas Mercier 2011 # Extended by Simon Warg 2015, https://github.com/mimon # XCode project file format based on http://www.monobjc.net/xcode-project-file-format.html """ See playground/xcode6/ for usage examples. """ from waflib import Context, TaskGen, Build, Utils, Errors, Logs import os, sys # FIXME too few extensions XCODE_EXTS = ['.c', '.cpp', '.m', '.mm'] HEADERS_GLOB = '**/(*.h|*.hpp|*.H|*.inl)' MAP_EXT = { '': "folder", '.h' : "sourcecode.c.h", '.hh': "sourcecode.cpp.h", '.inl': "sourcecode.cpp.h", '.hpp': "sourcecode.cpp.h", '.c': "sourcecode.c.c", '.m': "sourcecode.c.objc", '.mm': "sourcecode.cpp.objcpp", '.cc': "sourcecode.cpp.cpp", '.cpp': "sourcecode.cpp.cpp", '.C': "sourcecode.cpp.cpp", '.cxx': "sourcecode.cpp.cpp", '.c++': "sourcecode.cpp.cpp", '.l': "sourcecode.lex", # luthor '.ll': "sourcecode.lex", '.y': "sourcecode.yacc", '.yy': "sourcecode.yacc", '.plist': "text.plist.xml", ".nib": "wrapper.nib", ".xib": "text.xib", } # Used in PBXNativeTarget elements PRODUCT_TYPE_APPLICATION = 'com.apple.product-type.application' PRODUCT_TYPE_FRAMEWORK = 'com.apple.product-type.framework' PRODUCT_TYPE_EXECUTABLE = 'com.apple.product-type.tool' PRODUCT_TYPE_LIB_STATIC = 'com.apple.product-type.library.static' PRODUCT_TYPE_LIB_DYNAMIC = 'com.apple.product-type.library.dynamic' PRODUCT_TYPE_EXTENSION = 'com.apple.product-type.kernel-extension' PRODUCT_TYPE_IOKIT = 'com.apple.product-type.kernel-extension.iokit' # Used in PBXFileReference elements FILE_TYPE_APPLICATION = 'wrapper.cfbundle' FILE_TYPE_FRAMEWORK = 'wrapper.framework' FILE_TYPE_LIB_DYNAMIC = 'compiled.mach-o.dylib' FILE_TYPE_LIB_STATIC = 'archive.ar' FILE_TYPE_EXECUTABLE = 'compiled.mach-o.executable' # Tuple packs of the above TARGET_TYPE_FRAMEWORK = (PRODUCT_TYPE_FRAMEWORK, FILE_TYPE_FRAMEWORK, '.framework') TARGET_TYPE_APPLICATION = (PRODUCT_TYPE_APPLICATION, FILE_TYPE_APPLICATION, '.app') TARGET_TYPE_DYNAMIC_LIB = (PRODUCT_TYPE_LIB_DYNAMIC, FILE_TYPE_LIB_DYNAMIC, '.dylib') TARGET_TYPE_STATIC_LIB = (PRODUCT_TYPE_LIB_STATIC, FILE_TYPE_LIB_STATIC, '.a') TARGET_TYPE_EXECUTABLE = (PRODUCT_TYPE_EXECUTABLE, FILE_TYPE_EXECUTABLE, '') # Maps target type string to its data TARGET_TYPES = { 'framework': TARGET_TYPE_FRAMEWORK, 'app': TARGET_TYPE_APPLICATION, 'dylib': TARGET_TYPE_DYNAMIC_LIB, 'stlib': TARGET_TYPE_STATIC_LIB, 'exe' :TARGET_TYPE_EXECUTABLE, } def delete_invalid_values(dct): """ Deletes entries that are dictionaries or sets """ for k, v in list(dct.items()): if isinstance(v, dict) or isinstance(v, set): del dct[k] return dct """ Configuration of the global project settings. Sets an environment variable 'PROJ_CONFIGURATION' which is a dictionary of configuration name and buildsettings pair. E.g.: env.PROJ_CONFIGURATION = { 'Debug': { 'ARCHS': 'x86', ... } 'Release': { 'ARCHS' x86_64' ... } } The user can define a completely customized dictionary in configure() stage. Otherwise a default Debug/Release will be created based on env variable """ def configure(self): if not self.env.PROJ_CONFIGURATION: self.to_log("A default project configuration was created since no custom one was given in the configure(conf) stage. Define your custom project settings by adding PROJ_CONFIGURATION to env. The env.PROJ_CONFIGURATION must be a dictionary with at least one key, where each key is the configuration name, and the value is a dictionary of key/value settings.\n") # Check for any added config files added by the tool 'c_config'. if 'cfg_files' in self.env: self.env.INCLUDES = Utils.to_list(self.env.INCLUDES) + [os.path.abspath(os.path.dirname(f)) for f in self.env.cfg_files] # Create default project configuration? if 'PROJ_CONFIGURATION' not in self.env: defaults = delete_invalid_values(self.env.get_merged_dict()) self.env.PROJ_CONFIGURATION = { "Debug": defaults, "Release": defaults, } # Some build settings are required to be present by XCode. We will supply default values # if user hasn't defined any. defaults_required = [('PRODUCT_NAME', '$(TARGET_NAME)')] for cfgname,settings in self.env.PROJ_CONFIGURATION.items(): for default_var, default_val in defaults_required: if default_var not in settings: settings[default_var] = default_val # Error check customization if not isinstance(self.env.PROJ_CONFIGURATION, dict): raise Errors.ConfigurationError("The env.PROJ_CONFIGURATION must be a dictionary with at least one key, where each key is the configuration name, and the value is a dictionary of key/value settings.") part1 = 0 part2 = 10000 part3 = 0 id = 562000999 def newid(): global id id += 1 return "%04X%04X%04X%012d" % (0, 10000, 0, id) """ Represents a tree node in the XCode project plist file format. When written to a file, all attributes of XCodeNode are stringified together with its value. However, attributes starting with an underscore _ are ignored during that process and allows you to store arbitrary values that are not supposed to be written out. """ class XCodeNode(object): def __init__(self): self._id = newid() self._been_written = False def tostring(self, value): if isinstance(value, dict): result = "{\n" for k,v in value.items(): result = result + "\t\t\t%s = %s;\n" % (k, self.tostring(v)) result = result + "\t\t}" return result elif isinstance(value, str): return "\"%s\"" % value elif isinstance(value, list): result = "(\n" for i in value: result = result + "\t\t\t%s,\n" % self.tostring(i) result = result + "\t\t)" return result elif isinstance(value, XCodeNode): return value._id else: return str(value) def write_recursive(self, value, file): if isinstance(value, dict): for k,v in value.items(): self.write_recursive(v, file) elif isinstance(value, list): for i in value: self.write_recursive(i, file) elif isinstance(value, XCodeNode): value.write(file) def write(self, file): if not self._been_written: self._been_written = True for attribute,value in self.__dict__.items(): if attribute[0] != '_': self.write_recursive(value, file) w = file.write w("\t%s = {\n" % self._id) w("\t\tisa = %s;\n" % self.__class__.__name__) for attribute,value in self.__dict__.items(): if attribute[0] != '_': w("\t\t%s = %s;\n" % (attribute, self.tostring(value))) w("\t};\n\n") # Configurations class XCBuildConfiguration(XCodeNode): def __init__(self, name, settings = {}, env=None): XCodeNode.__init__(self) self.baseConfigurationReference = "" self.buildSettings = settings self.name = name if env and env.ARCH: settings['ARCHS'] = " ".join(env.ARCH) class XCConfigurationList(XCodeNode): def __init__(self, configlst): """ :param configlst: list of XCConfigurationList """ XCodeNode.__init__(self) self.buildConfigurations = configlst self.defaultConfigurationIsVisible = 0 self.defaultConfigurationName = configlst and configlst[0].name or "" # Group/Files class PBXFileReference(XCodeNode): def __init__(self, name, path, filetype = '', sourcetree = "SOURCE_ROOT"): XCodeNode.__init__(self) self.fileEncoding = 4 if not filetype: _, ext = os.path.splitext(name) filetype = MAP_EXT.get(ext, 'text') self.lastKnownFileType = filetype self.explicitFileType = filetype self.name = name self.path = path self.sourceTree = sourcetree def __hash__(self): return (self.path+self.name).__hash__() def __eq__(self, other): return (self.path, self.name) == (other.path, other.name) class PBXBuildFile(XCodeNode): """ This element indicate a file reference that is used in a PBXBuildPhase (either as an include or resource). """ def __init__(self, fileRef, settings={}): XCodeNode.__init__(self) # fileRef is a reference to a PBXFileReference object self.fileRef = fileRef # A map of key/value pairs for additional settings. self.settings = settings def __hash__(self): return (self.fileRef).__hash__() def __eq__(self, other): return self.fileRef == other.fileRef class PBXGroup(XCodeNode): def __init__(self, name, sourcetree = 'SOURCE_TREE'): XCodeNode.__init__(self) self.children = [] self.name = name self.sourceTree = sourcetree # Maintain a lookup table for all PBXFileReferences # that are contained in this group. self._filerefs = {} def add(self, sources): """ Add a list of PBXFileReferences to this group :param sources: list of PBXFileReferences objects """ self._filerefs.update(dict(zip(sources, sources))) self.children.extend(sources) def get_sub_groups(self): """ Returns all child PBXGroup objects contained in this group """ return list(filter(lambda x: isinstance(x, PBXGroup), self.children)) def find_fileref(self, fileref): """ Recursively search this group for an existing PBXFileReference. Returns None if none were found. The reason you'd want to reuse existing PBXFileReferences from a PBXGroup is that XCode doesn't like PBXFileReferences that aren't part of a PBXGroup hierarchy. If it isn't, the consequence is that certain UI features like 'Reveal in Finder' stops working. """ if fileref in self._filerefs: return self._filerefs[fileref] elif self.children: for childgroup in self.get_sub_groups(): f = childgroup.find_fileref(fileref) if f: return f return None class PBXContainerItemProxy(XCodeNode): """ This is the element for to decorate a target item. """ def __init__(self, containerPortal, remoteGlobalIDString, remoteInfo='', proxyType=1): XCodeNode.__init__(self) self.containerPortal = containerPortal # PBXProject self.remoteGlobalIDString = remoteGlobalIDString # PBXNativeTarget self.remoteInfo = remoteInfo # Target name self.proxyType = proxyType class PBXTargetDependency(XCodeNode): """ This is the element for referencing other target through content proxies. """ def __init__(self, native_target, proxy): XCodeNode.__init__(self) self.target = native_target self.targetProxy = proxy class PBXFrameworksBuildPhase(XCodeNode): """ This is the element for the framework link build phase, i.e. linking to frameworks """ def __init__(self, pbxbuildfiles): XCodeNode.__init__(self) self.buildActionMask = 2147483647 self.runOnlyForDeploymentPostprocessing = 0 self.files = pbxbuildfiles #List of PBXBuildFile (.o, .framework, .dylib) class PBXHeadersBuildPhase(XCodeNode): """ This is the element for adding header files to be packaged into the .framework """ def __init__(self, pbxbuildfiles): XCodeNode.__init__(self) self.buildActionMask = 2147483647 self.runOnlyForDeploymentPostprocessing = 0 self.files = pbxbuildfiles #List of PBXBuildFile (.o, .framework, .dylib) class PBXCopyFilesBuildPhase(XCodeNode): """ Represents the PBXCopyFilesBuildPhase section. PBXBuildFile can be added to this node to copy files after build is done. """ def __init__(self, pbxbuildfiles, dstpath, dstSubpathSpec=0, *args, **kwargs): XCodeNode.__init__(self) self.files = pbxbuildfiles self.dstPath = dstpath self.dstSubfolderSpec = dstSubpathSpec class PBXSourcesBuildPhase(XCodeNode): """ Represents the 'Compile Sources' build phase in a Xcode target """ def __init__(self, buildfiles): XCodeNode.__init__(self) self.files = buildfiles # List of PBXBuildFile objects class PBXLegacyTarget(XCodeNode): def __init__(self, action, target=''): XCodeNode.__init__(self) self.buildConfigurationList = XCConfigurationList([XCBuildConfiguration('waf', {})]) if not target: self.buildArgumentsString = "%s %s" % (sys.argv[0], action) else: self.buildArgumentsString = "%s %s --targets=%s" % (sys.argv[0], action, target) self.buildPhases = [] self.buildToolPath = sys.executable self.buildWorkingDirectory = "" self.dependencies = [] self.name = target or action self.productName = target or action self.passBuildSettingsInEnvironment = 0 class PBXShellScriptBuildPhase(XCodeNode): def __init__(self, action, target): XCodeNode.__init__(self) self.buildActionMask = 2147483647 self.files = [] self.inputPaths = [] self.outputPaths = [] self.runOnlyForDeploymentPostProcessing = 0 self.shellPath = "/bin/sh" self.shellScript = "%s %s %s --targets=%s" % (sys.executable, sys.argv[0], action, target) class PBXNativeTarget(XCodeNode): """ Represents a target in XCode, e.g. App, DyLib, Framework etc. """ def __init__(self, target, node, target_type=TARGET_TYPE_APPLICATION, configlist=[], buildphases=[]): XCodeNode.__init__(self) product_type = target_type[0] file_type = target_type[1] self.buildConfigurationList = XCConfigurationList(configlist) self.buildPhases = buildphases self.buildRules = [] self.dependencies = [] self.name = target self.productName = target self.productType = product_type # See TARGET_TYPE_ tuples constants self.productReference = PBXFileReference(node.name, node.abspath(), file_type, '') def add_configuration(self, cf): """ :type cf: XCBuildConfiguration """ self.buildConfigurationList.buildConfigurations.append(cf) def add_build_phase(self, phase): # Some build phase types may appear only once. If a phase type already exists, then merge them. if ( (phase.__class__ == PBXFrameworksBuildPhase) or (phase.__class__ == PBXSourcesBuildPhase) ): for b in self.buildPhases: if b.__class__ == phase.__class__: b.files.extend(phase.files) return self.buildPhases.append(phase) def add_dependency(self, depnd): self.dependencies.append(depnd) # Root project object class PBXProject(XCodeNode): def __init__(self, name, version, env): XCodeNode.__init__(self) if not isinstance(env.PROJ_CONFIGURATION, dict): raise Errors.WafError("Error: env.PROJ_CONFIGURATION must be a dictionary. This is done for you if you do not define one yourself. However, did you load the xcode module at the end of your wscript configure() ?") # Retrieve project configuration configurations = [] for config_name, settings in env.PROJ_CONFIGURATION.items(): cf = XCBuildConfiguration(config_name, settings) configurations.append(cf) self.buildConfigurationList = XCConfigurationList(configurations) self.compatibilityVersion = version[0] self.hasScannedForEncodings = 1 self.mainGroup = PBXGroup(name) self.projectRoot = "" self.projectDirPath = "" self.targets = [] self._objectVersion = version[1] def create_target_dependency(self, target, name): """ : param target : PXBNativeTarget """ proxy = PBXContainerItemProxy(self, target, name) dependency = PBXTargetDependency(target, proxy) return dependency def write(self, file): # Make sure this is written only once if self._been_written: return w = file.write w("// !$*UTF8*$!\n") w("{\n") w("\tarchiveVersion = 1;\n") w("\tclasses = {\n") w("\t};\n") w("\tobjectVersion = %d;\n" % self._objectVersion) w("\tobjects = {\n\n") XCodeNode.write(self, file) w("\t};\n") w("\trootObject = %s;\n" % self._id) w("}\n") def add_target(self, target): self.targets.append(target) def get_target(self, name): """ Get a reference to PBXNativeTarget if it exists """ for t in self.targets: if t.name == name: return t return None @TaskGen.feature('c', 'cxx') @TaskGen.after('propagate_uselib_vars', 'apply_incpaths') def process_xcode(self): bld = self.bld try: p = bld.project except AttributeError: return if not hasattr(self, 'target_type'): return products_group = bld.products_group target_group = PBXGroup(self.name) p.mainGroup.children.append(target_group) # Determine what type to build - framework, app bundle etc. target_type = getattr(self, 'target_type', 'app') if target_type not in TARGET_TYPES: raise Errors.WafError("Target type '%s' does not exists. Available options are '%s'. In target '%s'" % (target_type, "', '".join(TARGET_TYPES.keys()), self.name)) else: target_type = TARGET_TYPES[target_type] file_ext = target_type[2] # Create the output node target_node = self.path.find_or_declare(self.name+file_ext) target = PBXNativeTarget(self.name, target_node, target_type, [], []) products_group.children.append(target.productReference) # Pull source files from the 'source' attribute and assign them to a UI group. # Use a default UI group named 'Source' unless the user # provides a 'group_files' dictionary to customize the UI grouping. sources = getattr(self, 'source', []) if hasattr(self, 'group_files'): group_files = getattr(self, 'group_files', []) for grpname,files in group_files.items(): group = bld.create_group(grpname, files) target_group.children.append(group) else: group = bld.create_group('Source', sources) target_group.children.append(group) # Create a PBXFileReference for each source file. # If the source file already exists as a PBXFileReference in any of the UI groups, then # reuse that PBXFileReference object (XCode does not like it if we don't reuse) for idx, path in enumerate(sources): fileref = PBXFileReference(path.name, path.abspath()) existing_fileref = target_group.find_fileref(fileref) if existing_fileref: sources[idx] = existing_fileref else: sources[idx] = fileref # If the 'source' attribute contains any file extension that XCode can't work with, # then remove it. The allowed file extensions are defined in XCODE_EXTS. is_valid_file_extension = lambda file: os.path.splitext(file.path)[1] in XCODE_EXTS sources = list(filter(is_valid_file_extension, sources)) buildfiles = [bld.unique_buildfile(PBXBuildFile(x)) for x in sources] target.add_build_phase(PBXSourcesBuildPhase(buildfiles)) # Check if any framework to link against is some other target we've made libs = getattr(self, 'tmp_use_seen', []) for lib in libs: use_target = p.get_target(lib) if use_target: # Create an XCode dependency so that XCode knows to build the other target before this target dependency = p.create_target_dependency(use_target, use_target.name) target.add_dependency(dependency) buildphase = PBXFrameworksBuildPhase([PBXBuildFile(use_target.productReference)]) target.add_build_phase(buildphase) if lib in self.env.LIB: self.env.LIB = list(filter(lambda x: x != lib, self.env.LIB)) # If 'export_headers' is present, add files to the Headers build phase in xcode. # These are files that'll get packed into the Framework for instance. exp_hdrs = getattr(self, 'export_headers', []) hdrs = bld.as_nodes(Utils.to_list(exp_hdrs)) files = [p.mainGroup.find_fileref(PBXFileReference(n.name, n.abspath())) for n in hdrs] files = [PBXBuildFile(f, {'ATTRIBUTES': ('Public',)}) for f in files] buildphase = PBXHeadersBuildPhase(files) target.add_build_phase(buildphase) # Merge frameworks and libs into one list, and prefix the frameworks frameworks = Utils.to_list(self.env.FRAMEWORK) frameworks = ' '.join(['-framework %s' % (f.split('.framework')[0]) for f in frameworks]) libs = Utils.to_list(self.env.STLIB) + Utils.to_list(self.env.LIB) libs = ' '.join(bld.env['STLIB_ST'] % t for t in libs) # Override target specific build settings bldsettings = { 'HEADER_SEARCH_PATHS': ['$(inherited)'] + self.env['INCPATHS'], 'LIBRARY_SEARCH_PATHS': ['$(inherited)'] + Utils.to_list(self.env.LIBPATH) + Utils.to_list(self.env.STLIBPATH) + Utils.to_list(self.env.LIBDIR) , 'FRAMEWORK_SEARCH_PATHS': ['$(inherited)'] + Utils.to_list(self.env.FRAMEWORKPATH), 'OTHER_LDFLAGS': libs + ' ' + frameworks, 'OTHER_LIBTOOLFLAGS': bld.env['LINKFLAGS'], 'OTHER_CPLUSPLUSFLAGS': Utils.to_list(self.env['CXXFLAGS']), 'OTHER_CFLAGS': Utils.to_list(self.env['CFLAGS']), 'INSTALL_PATH': [] } # Install path installpaths = Utils.to_list(getattr(self, 'install', [])) prodbuildfile = PBXBuildFile(target.productReference) for instpath in installpaths: bldsettings['INSTALL_PATH'].append(instpath) target.add_build_phase(PBXCopyFilesBuildPhase([prodbuildfile], instpath)) if not bldsettings['INSTALL_PATH']: del bldsettings['INSTALL_PATH'] # Create build settings which can override the project settings. Defaults to none if user # did not pass argument. This will be filled up with target specific # search paths, libs to link etc. settings = getattr(self, 'settings', {}) # The keys represents different build configuration, e.g. Debug, Release and so on.. # Insert our generated build settings to all configuration names keys = set(settings.keys() + bld.env.PROJ_CONFIGURATION.keys()) for k in keys: if k in settings: settings[k].update(bldsettings) else: settings[k] = bldsettings for k,v in settings.items(): target.add_configuration(XCBuildConfiguration(k, v)) p.add_target(target) class xcode(Build.BuildContext): cmd = 'xcode6' fun = 'build' def as_nodes(self, files): """ Returns a list of waflib.Nodes from a list of string of file paths """ nodes = [] for x in files: if not isinstance(x, str): d = x else: d = self.srcnode.find_node(x) if not d: raise Errors.WafError('File \'%s\' was not found' % x) nodes.append(d) return nodes def create_group(self, name, files): """ Returns a new PBXGroup containing the files (paths) passed in the files arg :type files: string """ group = PBXGroup(name) """ Do not use unique file reference here, since XCode seem to allow only one file reference to be referenced by a group. """ files_ = [] for d in self.as_nodes(Utils.to_list(files)): fileref = PBXFileReference(d.name, d.abspath()) files_.append(fileref) group.add(files_) return group def unique_buildfile(self, buildfile): """ Returns a unique buildfile, possibly an existing one. Use this after you've constructed a PBXBuildFile to make sure there is only one PBXBuildFile for the same file in the same project. """ try: build_files = self.build_files except AttributeError: build_files = self.build_files = {} if buildfile not in build_files: build_files[buildfile] = buildfile return build_files[buildfile] def execute(self): """ Entry point """ self.restore() if not self.all_envs: self.load_envs() self.recurse([self.run_dir]) appname = getattr(Context.g_module, Context.APPNAME, os.path.basename(self.srcnode.abspath())) p = PBXProject(appname, ('Xcode 3.2', 46), self.env) # If we don't create a Products group, then # XCode will create one, which entails that # we'll start to see duplicate files in the UI # for some reason. products_group = PBXGroup('Products') p.mainGroup.children.append(products_group) self.project = p self.products_group = products_group # post all task generators # the process_xcode method above will be called for each target if self.targets and self.targets != '*': (self._min_grp, self._exact_tg) = self.get_targets() self.current_group = 0 while self.current_group < len(self.groups): self.post_group() self.current_group += 1 node = self.bldnode.make_node('%s.xcodeproj' % appname) node.mkdir() node = node.make_node('project.pbxproj') with open(node.abspath(), 'w') as f: p.write(f) Logs.pprint('GREEN', 'Wrote %r' % node.abspath()) def bind_fun(tgtype): def fun(self, *k, **kw): tgtype = fun.__name__ if tgtype == 'shlib' or tgtype == 'dylib': features = 'cxx cxxshlib' tgtype = 'dylib' elif tgtype == 'framework': features = 'cxx cxxshlib' tgtype = 'framework' elif tgtype == 'program': features = 'cxx cxxprogram' tgtype = 'exe' elif tgtype == 'app': features = 'cxx cxxprogram' tgtype = 'app' elif tgtype == 'stlib': features = 'cxx cxxstlib' tgtype = 'stlib' lst = kw['features'] = Utils.to_list(kw.get('features', [])) for x in features.split(): if not x in kw['features']: lst.append(x) kw['target_type'] = tgtype return self(*k, **kw) fun.__name__ = tgtype setattr(Build.BuildContext, tgtype, fun) return fun for xx in 'app framework dylib shlib stlib program'.split(): bind_fun(xx) tdb-1.4.2/third_party/waf/waflib/fixpy2.py0000660000000000000000000000263513444661622020406 0ustar rootroot00000000000000#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2010-2018 (ita) from __future__ import with_statement import os all_modifs = {} def fixdir(dir): """Call all substitution functions on Waf folders""" for k in all_modifs: for v in all_modifs[k]: modif(os.path.join(dir, 'waflib'), k, v) def modif(dir, name, fun): """Call a substitution function""" if name == '*': lst = [] for y in '. Tools extras'.split(): for x in os.listdir(os.path.join(dir, y)): if x.endswith('.py'): lst.append(y + os.sep + x) for x in lst: modif(dir, x, fun) return filename = os.path.join(dir, name) with open(filename, 'r') as f: txt = f.read() txt = fun(txt) with open(filename, 'w') as f: f.write(txt) def subst(*k): """register a substitution function""" def do_subst(fun): for x in k: try: all_modifs[x].append(fun) except KeyError: all_modifs[x] = [fun] return fun return do_subst @subst('*') def r1(code): "utf-8 fixes for python < 2.6" code = code.replace('as e:', ',e:') code = code.replace(".decode(sys.stdout.encoding or'latin-1',errors='replace')", '') return code.replace('.encode()', '') @subst('Runner.py') def r4(code): "generator syntax" return code.replace('next(self.biter)', 'self.biter.next()') @subst('Context.py') def r5(code): return code.replace("('Execution failure: %s'%str(e),ex=e)", "('Execution failure: %s'%str(e),ex=e),None,sys.exc_info()[2]") tdb-1.4.2/third_party/waf/waflib/processor.py0000770000000000000000000000310113527011455021166 0ustar rootroot00000000000000#! /usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2016-2018 (ita) import os, sys, traceback, base64, signal try: import cPickle except ImportError: import pickle as cPickle try: import subprocess32 as subprocess except ImportError: import subprocess try: TimeoutExpired = subprocess.TimeoutExpired except AttributeError: class TimeoutExpired(Exception): pass def run(): txt = sys.stdin.readline().strip() if not txt: # parent process probably ended sys.exit(1) [cmd, kwargs, cargs] = cPickle.loads(base64.b64decode(txt)) cargs = cargs or {} if not 'close_fds' in kwargs: # workers have no fds kwargs['close_fds'] = False ret = 1 out, err, ex, trace = (None, None, None, None) try: proc = subprocess.Popen(cmd, **kwargs) try: out, err = proc.communicate(**cargs) except TimeoutExpired: if kwargs.get('start_new_session') and hasattr(os, 'killpg'): os.killpg(proc.pid, signal.SIGKILL) else: proc.kill() out, err = proc.communicate() exc = TimeoutExpired(proc.args, timeout=cargs['timeout'], output=out) exc.stderr = err raise exc ret = proc.returncode except Exception as e: exc_type, exc_value, tb = sys.exc_info() exc_lines = traceback.format_exception(exc_type, exc_value, tb) trace = str(cmd) + '\n' + ''.join(exc_lines) ex = e.__class__.__name__ # it is just text so maybe we do not need to pickle() tmp = [ret, out, err, ex, trace] obj = base64.b64encode(cPickle.dumps(tmp)) sys.stdout.write(obj.decode()) sys.stdout.write('\n') sys.stdout.flush() while 1: try: run() except KeyboardInterrupt: break